summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/hisilicon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/net/ethernet/hisilicon
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/hisilicon')
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig140
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile11
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c1056
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c978
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1333
-rw-r--r--drivers/net/ethernet/hisilicon/hns/Makefile13
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.c470
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hnae.h713
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c1025
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c760
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h41
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c1256
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h471
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c3158
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h468
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c771
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h35
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c644
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h119
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c1119
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h164
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h1095
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c837
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h12
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c2446
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.h95
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_ethtool.c1304
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/Makefile16
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h176
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.c392
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hnae3.h767
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c102
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c479
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c4756
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h614
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c1579
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_trace.h139
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile12
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c537
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h1144
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c507
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h15
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c1557
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h726
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c2009
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h136
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c11550
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h1017
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c867
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c272
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h13
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c1505
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h178
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile10
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c488
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h317
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c3739
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h351
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c351
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h87
-rw-r--r--drivers/net/ethernet/hisilicon/hns_mdio.c574
62 files changed, 57603 insertions, 0 deletions
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
new file mode 100644
index 000000000..44f9279cd
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -0,0 +1,140 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# HISILICON device configuration
+#
+
+config NET_VENDOR_HISILICON
+ bool "Hisilicon devices"
+ default y
+ depends on OF || ACPI
+ depends on ARM || ARM64 || COMPILE_TEST
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Hisilicon devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_HISILICON
+
+config HIX5HD2_GMAC
+ tristate "Hisilicon HIX5HD2 Family Network Device Support"
+ select PHYLIB
+ help
+ This selects the hix5hd2 mac family network device.
+
+config HISI_FEMAC
+ tristate "Hisilicon Fast Ethernet MAC device support"
+ depends on HAS_IOMEM
+ select PHYLIB
+ select RESET_CONTROLLER
+ help
+ This selects the Hisilicon Fast Ethernet MAC device(FEMAC).
+ The FEMAC receives and transmits data over Ethernet
+ ports at 10/100 Mbps in full-duplex or half-duplex mode.
+ The FEMAC exchanges data with the CPU, and supports
+ the energy efficient Ethernet (EEE).
+
+config HIP04_ETH
+ tristate "HISILICON P04 Ethernet support"
+ depends on HAS_IOMEM # For MFD_SYSCON
+ select MARVELL_PHY
+ select MFD_SYSCON
+ select HNS_MDIO
+ help
+ If you wish to compile a kernel for a hardware with hisilicon p04 SoC and
+ want to use the internal ethernet then you should answer Y to this.
+
+config HI13X1_GMAC
+ bool "Hisilicon HI13X1 Network Device Support"
+ depends on HIP04_ETH
+ help
+ If you wish to compile a kernel for a hardware with hisilicon hi13x1_gamc
+ then you should answer Y to this. This makes this driver suitable for use
+ on certain boards such as the HI13X1.
+
+ If you are unsure, say N.
+
+config HNS_MDIO
+ tristate
+ select PHYLIB
+ help
+ This selects the HNS MDIO support. It is needed by HNS_DSAF to access
+ the PHY
+
+config HNS
+ tristate
+ help
+ This selects the framework support for Hisilicon Network Subsystem. It
+ is needed by any driver which provides HNS acceleration engine or make
+ use of the engine
+
+config HNS_DSAF
+ tristate "Hisilicon HNS DSAF device Support"
+ select HNS
+ select HNS_MDIO
+ help
+ This selects the DSAF (Distributed System Area Frabric) network
+ acceleration engine support. The engine is used in Hisilicon hip05,
+ Hi1610 and further ICT SoC
+
+config HNS_ENET
+ tristate "Hisilicon HNS Ethernet Device Support"
+ select PHYLIB
+ select HNS
+ help
+ This selects the general ethernet driver for HNS. This module make
+ use of any HNS AE driver, such as HNS_DSAF
+
+config HNS3
+ tristate "Hisilicon Network Subsystem Support HNS3 (Framework)"
+ depends on PCI
+ help
+ This selects the framework support for Hisilicon Network Subsystem 3.
+ This layer facilitates clients like ENET, RoCE and user-space ethernet
+ drivers(like ODP)to register with HNAE devices and their associated
+ operations.
+
+if HNS3
+
+config HNS3_HCLGE
+ tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support"
+ default m
+ depends on PCI_MSI
+ help
+ This selects the HNS3_HCLGE network acceleration engine & its hardware
+ compatibility layer. The engine would be used in Hisilicon hip08 family of
+ SoCs and further upcoming SoCs.
+
+config HNS3_DCB
+ bool "Hisilicon HNS3 Data Center Bridge Support"
+ default n
+ depends on HNS3_HCLGE && DCB
+ help
+ Say Y here if you want to use Data Center Bridging (DCB) in the HNS3 driver.
+
+ If unsure, say N.
+
+config HNS3_HCLGEVF
+ tristate "Hisilicon HNS3VF Acceleration Engine & Compatibility Layer Support"
+ depends on PCI_MSI
+ depends on HNS3_HCLGE
+ help
+ This selects the HNS3 VF drivers network acceleration engine & its hardware
+ compatibility layer. The engine would be used in Hisilicon hip08 family of
+ SoCs and further upcoming SoCs.
+
+config HNS3_ENET
+ tristate "Hisilicon HNS3 Ethernet Device Support"
+ default m
+ depends on 64BIT && PCI
+ depends on INET
+ help
+ This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08
+ family of SoCs. This module depends upon HNAE3 driver to access the HNAE3
+ devices and their associated operations.
+
+endif #HNS3
+
+endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
new file mode 100644
index 000000000..7f76d4120
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
+obj-$(CONFIG_HIP04_ETH) += hip04_eth.o
+obj-$(CONFIG_HNS_MDIO) += hns_mdio.o
+obj-$(CONFIG_HNS) += hns/
+obj-$(CONFIG_HNS3) += hns3/
+obj-$(CONFIG_HISI_FEMAC) += hisi_femac.o
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
new file mode 100644
index 000000000..e53512f68
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -0,0 +1,1056 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ */
+
+#include <linux/module.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/ktime.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+#define SC_PPE_RESET_DREQ 0x026C
+
+#define PPE_CFG_RX_ADDR 0x100
+#define PPE_CFG_POOL_GRP 0x300
+#define PPE_CFG_RX_BUF_SIZE 0x400
+#define PPE_CFG_RX_FIFO_SIZE 0x500
+#define PPE_CURR_BUF_CNT 0xa200
+
+#define GE_DUPLEX_TYPE 0x08
+#define GE_MAX_FRM_SIZE_REG 0x3c
+#define GE_PORT_MODE 0x40
+#define GE_PORT_EN 0x44
+#define GE_SHORT_RUNTS_THR_REG 0x50
+#define GE_TX_LOCAL_PAGE_REG 0x5c
+#define GE_TRANSMIT_CONTROL_REG 0x60
+#define GE_CF_CRC_STRIP_REG 0x1b0
+#define GE_MODE_CHANGE_REG 0x1b4
+#define GE_RECV_CONTROL_REG 0x1e0
+#define GE_STATION_MAC_ADDRESS 0x210
+
+#define PPE_CFG_BUS_CTRL_REG 0x424
+#define PPE_CFG_RX_CTRL_REG 0x428
+
+#if defined(CONFIG_HI13X1_GMAC)
+#define PPE_CFG_CPU_ADD_ADDR 0x6D0
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x500
+#define PPE_CFG_RX_PKT_MODE_REG 0x504
+#define PPE_CFG_QOS_VMID_GEN 0x520
+#define PPE_CFG_RX_PKT_INT 0x740
+#define PPE_INTEN 0x700
+#define PPE_INTSTS 0x708
+#define PPE_RINT 0x704
+#define PPE_CFG_STS_MODE 0x880
+#else
+#define PPE_CFG_CPU_ADD_ADDR 0x580
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x408
+#define PPE_CFG_RX_PKT_MODE_REG 0x438
+#define PPE_CFG_QOS_VMID_GEN 0x500
+#define PPE_CFG_RX_PKT_INT 0x538
+#define PPE_INTEN 0x600
+#define PPE_INTSTS 0x608
+#define PPE_RINT 0x604
+#define PPE_CFG_STS_MODE 0x700
+#endif /* CONFIG_HI13X1_GMAC */
+
+#define PPE_HIS_RX_PKT_CNT 0x804
+
+#define RESET_DREQ_ALL 0xffffffff
+
+/* REG_INTERRUPT */
+#define RCV_INT BIT(10)
+#define RCV_NOBUF BIT(8)
+#define RCV_DROP BIT(7)
+#define TX_DROP BIT(6)
+#define DEF_INT_ERR (RCV_NOBUF | RCV_DROP | TX_DROP)
+#define DEF_INT_MASK (RCV_INT | DEF_INT_ERR)
+
+/* TX descriptor config */
+#define TX_FREE_MEM BIT(0)
+#define TX_READ_ALLOC_L3 BIT(1)
+#if defined(CONFIG_HI13X1_GMAC)
+#define TX_CLEAR_WB BIT(7)
+#define TX_RELEASE_TO_PPE BIT(4)
+#define TX_FINISH_CACHE_INV BIT(6)
+#define TX_POOL_SHIFT 16
+#else
+#define TX_CLEAR_WB BIT(4)
+#define TX_FINISH_CACHE_INV BIT(2)
+#endif
+#define TX_L3_CHECKSUM BIT(5)
+#define TX_LOOP_BACK BIT(11)
+
+/* RX error */
+#define RX_PKT_DROP BIT(0)
+#define RX_L2_ERR BIT(1)
+#define RX_PKT_ERR (RX_PKT_DROP | RX_L2_ERR)
+
+#define SGMII_SPEED_1000 0x08
+#define SGMII_SPEED_100 0x07
+#define SGMII_SPEED_10 0x06
+#define MII_SPEED_100 0x01
+#define MII_SPEED_10 0x00
+
+#define GE_DUPLEX_FULL BIT(0)
+#define GE_DUPLEX_HALF 0x00
+#define GE_MODE_CHANGE_EN BIT(0)
+
+#define GE_TX_AUTO_NEG BIT(5)
+#define GE_TX_ADD_CRC BIT(6)
+#define GE_TX_SHORT_PAD_THROUGH BIT(7)
+
+#define GE_RX_STRIP_CRC BIT(0)
+#define GE_RX_STRIP_PAD BIT(3)
+#define GE_RX_PAD_EN BIT(4)
+
+#define GE_AUTO_NEG_CTL BIT(0)
+
+#define GE_RX_INT_THRESHOLD BIT(6)
+#define GE_RX_TIMEOUT 0x04
+
+#define GE_RX_PORT_EN BIT(1)
+#define GE_TX_PORT_EN BIT(2)
+
+#define PPE_CFG_RX_PKT_ALIGN BIT(18)
+
+#if defined(CONFIG_HI13X1_GMAC)
+#define PPE_CFG_QOS_VMID_GRP_SHIFT 4
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 7
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(0)
+#define PPE_CFG_QOS_VMID_MODE BIT(15)
+#define PPE_CFG_BUS_LOCAL_REL (BIT(9) | BIT(15) | BIT(19) | BIT(23))
+
+/* buf unit size is cache_line_size, which is 64, so the shift is 6 */
+#define PPE_BUF_SIZE_SHIFT 6
+#define PPE_TX_BUF_HOLD BIT(31)
+#define SOC_CACHE_LINE_MASK 0x3F
+#else
+#define PPE_CFG_QOS_VMID_GRP_SHIFT 8
+#define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11
+#define PPE_CFG_STS_RX_PKT_CNT_RC BIT(12)
+#define PPE_CFG_QOS_VMID_MODE BIT(14)
+#define PPE_CFG_BUS_LOCAL_REL BIT(14)
+
+/* buf unit size is 1, so the shift is 6 */
+#define PPE_BUF_SIZE_SHIFT 0
+#define PPE_TX_BUF_HOLD 0
+#endif /* CONFIG_HI13X1_GMAC */
+
+#define PPE_CFG_RX_FIFO_FSFU BIT(11)
+#define PPE_CFG_RX_DEPTH_SHIFT 16
+#define PPE_CFG_RX_START_SHIFT 0
+
+#define PPE_CFG_BUS_BIG_ENDIEN BIT(0)
+
+#define RX_DESC_NUM 128
+#define TX_DESC_NUM 256
+#define TX_NEXT(N) (((N) + 1) & (TX_DESC_NUM-1))
+#define RX_NEXT(N) (((N) + 1) & (RX_DESC_NUM-1))
+
+#define GMAC_PPE_RX_PKT_MAX_LEN 379
+#define GMAC_MAX_PKT_LEN 1516
+#define GMAC_MIN_PKT_LEN 31
+#define RX_BUF_SIZE 1600
+#define RESET_TIMEOUT 1000
+#define TX_TIMEOUT (6 * HZ)
+
+#define DRV_NAME "hip04-ether"
+#define DRV_VERSION "v1.0"
+
+#define HIP04_MAX_TX_COALESCE_USECS 200
+#define HIP04_MIN_TX_COALESCE_USECS 100
+#define HIP04_MAX_TX_COALESCE_FRAMES 200
+#define HIP04_MIN_TX_COALESCE_FRAMES 100
+
+struct tx_desc {
+#if defined(CONFIG_HI13X1_GMAC)
+ u32 reserved1[2];
+ u32 send_addr;
+ u16 send_size;
+ u16 data_offset;
+ u32 reserved2[7];
+ u32 cfg;
+ u32 wb_addr;
+ u32 reserved3[3];
+#else
+ u32 send_addr;
+ u32 send_size;
+ u32 next_addr;
+ u32 cfg;
+ u32 wb_addr;
+#endif
+} __aligned(64);
+
+struct rx_desc {
+#if defined(CONFIG_HI13X1_GMAC)
+ u32 reserved1[3];
+ u16 pkt_len;
+ u16 reserved_16;
+ u32 reserved2[6];
+ u32 pkt_err;
+ u32 reserved3[5];
+#else
+ u16 reserved_16;
+ u16 pkt_len;
+ u32 reserve1[3];
+ u32 pkt_err;
+ u32 reserve2[4];
+#endif
+};
+
+struct hip04_priv {
+ void __iomem *base;
+#if defined(CONFIG_HI13X1_GMAC)
+ void __iomem *sysctrl_base;
+#endif
+ phy_interface_t phy_mode;
+ int chan;
+ unsigned int port;
+ unsigned int group;
+ unsigned int speed;
+ unsigned int duplex;
+ unsigned int reg_inten;
+
+ struct napi_struct napi;
+ struct device *dev;
+ struct net_device *ndev;
+
+ struct tx_desc *tx_desc;
+ dma_addr_t tx_desc_dma;
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ dma_addr_t tx_phys[TX_DESC_NUM];
+ unsigned int tx_head;
+
+ int tx_coalesce_frames;
+ int tx_coalesce_usecs;
+ struct hrtimer tx_coalesce_timer;
+
+ unsigned char *rx_buf[RX_DESC_NUM];
+ dma_addr_t rx_phys[RX_DESC_NUM];
+ unsigned int rx_head;
+ unsigned int rx_buf_size;
+ unsigned int rx_cnt_remaining;
+
+ struct device_node *phy_node;
+ struct phy_device *phy;
+ struct regmap *map;
+ struct work_struct tx_timeout_task;
+
+ /* written only by tx cleanup */
+ unsigned int tx_tail ____cacheline_aligned_in_smp;
+};
+
+static inline unsigned int tx_count(unsigned int head, unsigned int tail)
+{
+ return (head - tail) % TX_DESC_NUM;
+}
+
+static void hip04_config_port(struct net_device *ndev, u32 speed, u32 duplex)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ priv->speed = speed;
+ priv->duplex = duplex;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_SGMII:
+ if (speed == SPEED_1000)
+ val = SGMII_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = SGMII_SPEED_100;
+ else
+ val = SGMII_SPEED_10;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (speed == SPEED_100)
+ val = MII_SPEED_100;
+ else
+ val = MII_SPEED_10;
+ break;
+ default:
+ netdev_warn(ndev, "not supported mode\n");
+ val = MII_SPEED_10;
+ break;
+ }
+ writel_relaxed(val, priv->base + GE_PORT_MODE);
+
+ val = duplex ? GE_DUPLEX_FULL : GE_DUPLEX_HALF;
+ writel_relaxed(val, priv->base + GE_DUPLEX_TYPE);
+
+ val = GE_MODE_CHANGE_EN;
+ writel_relaxed(val, priv->base + GE_MODE_CHANGE_REG);
+}
+
+static void hip04_reset_dreq(struct hip04_priv *priv)
+{
+#if defined(CONFIG_HI13X1_GMAC)
+ writel_relaxed(RESET_DREQ_ALL, priv->sysctrl_base + SC_PPE_RESET_DREQ);
+#endif
+}
+
+static void hip04_reset_ppe(struct hip04_priv *priv)
+{
+ u32 val, tmp, timeout = 0;
+
+ do {
+ regmap_read(priv->map, priv->port * 4 + PPE_CURR_BUF_CNT, &val);
+ regmap_read(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, &tmp);
+ if (timeout++ > RESET_TIMEOUT)
+ break;
+ } while (val & 0xfff);
+}
+
+static void hip04_config_fifo(struct hip04_priv *priv)
+{
+ u32 val;
+
+ val = readl_relaxed(priv->base + PPE_CFG_STS_MODE);
+ val |= PPE_CFG_STS_RX_PKT_CNT_RC;
+ writel_relaxed(val, priv->base + PPE_CFG_STS_MODE);
+
+ val = BIT(priv->group);
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_POOL_GRP, val);
+
+ val = priv->group << PPE_CFG_QOS_VMID_GRP_SHIFT;
+ val |= PPE_CFG_QOS_VMID_MODE;
+ writel_relaxed(val, priv->base + PPE_CFG_QOS_VMID_GEN);
+
+ val = RX_BUF_SIZE >> PPE_BUF_SIZE_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_BUF_SIZE, val);
+
+ val = RX_DESC_NUM << PPE_CFG_RX_DEPTH_SHIFT;
+ val |= PPE_CFG_RX_FIFO_FSFU;
+ val |= priv->chan << PPE_CFG_RX_START_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_FIFO_SIZE, val);
+
+ val = NET_IP_ALIGN << PPE_CFG_RX_CTRL_ALIGN_SHIFT;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_CTRL_REG);
+
+ val = PPE_CFG_RX_PKT_ALIGN;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_MODE_REG);
+
+ val = PPE_CFG_BUS_LOCAL_REL | PPE_CFG_BUS_BIG_ENDIEN;
+ writel_relaxed(val, priv->base + PPE_CFG_BUS_CTRL_REG);
+
+ val = GMAC_PPE_RX_PKT_MAX_LEN;
+ writel_relaxed(val, priv->base + PPE_CFG_MAX_FRAME_LEN_REG);
+
+ val = GMAC_MAX_PKT_LEN;
+ writel_relaxed(val, priv->base + GE_MAX_FRM_SIZE_REG);
+
+ val = GMAC_MIN_PKT_LEN;
+ writel_relaxed(val, priv->base + GE_SHORT_RUNTS_THR_REG);
+
+ val = readl_relaxed(priv->base + GE_TRANSMIT_CONTROL_REG);
+ val |= GE_TX_AUTO_NEG | GE_TX_ADD_CRC | GE_TX_SHORT_PAD_THROUGH;
+ writel_relaxed(val, priv->base + GE_TRANSMIT_CONTROL_REG);
+
+ val = GE_RX_STRIP_CRC;
+ writel_relaxed(val, priv->base + GE_CF_CRC_STRIP_REG);
+
+ val = readl_relaxed(priv->base + GE_RECV_CONTROL_REG);
+ val |= GE_RX_STRIP_PAD | GE_RX_PAD_EN;
+ writel_relaxed(val, priv->base + GE_RECV_CONTROL_REG);
+
+#ifndef CONFIG_HI13X1_GMAC
+ val = GE_AUTO_NEG_CTL;
+ writel_relaxed(val, priv->base + GE_TX_LOCAL_PAGE_REG);
+#endif
+}
+
+static void hip04_mac_enable(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* enable tx & rx */
+ val = readl_relaxed(priv->base + GE_PORT_EN);
+ val |= GE_RX_PORT_EN | GE_TX_PORT_EN;
+ writel_relaxed(val, priv->base + GE_PORT_EN);
+
+ /* clear rx int */
+ val = RCV_INT;
+ writel_relaxed(val, priv->base + PPE_RINT);
+
+ /* config recv int */
+ val = GE_RX_INT_THRESHOLD | GE_RX_TIMEOUT;
+ writel_relaxed(val, priv->base + PPE_CFG_RX_PKT_INT);
+
+ /* enable interrupt */
+ priv->reg_inten = DEF_INT_MASK;
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+}
+
+static void hip04_mac_disable(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ u32 val;
+
+ /* disable int */
+ priv->reg_inten &= ~(DEF_INT_MASK);
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+
+ /* disable tx & rx */
+ val = readl_relaxed(priv->base + GE_PORT_EN);
+ val &= ~(GE_RX_PORT_EN | GE_TX_PORT_EN);
+ writel_relaxed(val, priv->base + GE_PORT_EN);
+}
+
+static void hip04_set_xmit_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+ u32 val;
+
+ val = phys >> PPE_BUF_SIZE_SHIFT | PPE_TX_BUF_HOLD;
+ writel(val, priv->base + PPE_CFG_CPU_ADD_ADDR);
+}
+
+static void hip04_set_recv_desc(struct hip04_priv *priv, dma_addr_t phys)
+{
+ u32 val;
+
+ val = phys >> PPE_BUF_SIZE_SHIFT;
+ regmap_write(priv->map, priv->port * 4 + PPE_CFG_RX_ADDR, val);
+}
+
+static u32 hip04_recv_cnt(struct hip04_priv *priv)
+{
+ return readl(priv->base + PPE_HIS_RX_PKT_CNT);
+}
+
+static void hip04_update_mac_address(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+
+ writel_relaxed(((ndev->dev_addr[0] << 8) | (ndev->dev_addr[1])),
+ priv->base + GE_STATION_MAC_ADDRESS);
+ writel_relaxed(((ndev->dev_addr[2] << 24) | (ndev->dev_addr[3] << 16) |
+ (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5])),
+ priv->base + GE_STATION_MAC_ADDRESS + 4);
+}
+
+static int hip04_set_mac_address(struct net_device *ndev, void *addr)
+{
+ eth_mac_addr(ndev, addr);
+ hip04_update_mac_address(ndev);
+ return 0;
+}
+
+static int hip04_tx_reclaim(struct net_device *ndev, bool force)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ unsigned tx_tail = priv->tx_tail;
+ struct tx_desc *desc;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int count;
+
+ smp_rmb();
+ count = tx_count(READ_ONCE(priv->tx_head), tx_tail);
+ if (count == 0)
+ goto out;
+
+ while (count) {
+ desc = &priv->tx_desc[tx_tail];
+ if (desc->send_addr != 0) {
+ if (force)
+ desc->send_addr = 0;
+ else
+ break;
+ }
+
+ if (priv->tx_phys[tx_tail]) {
+ dma_unmap_single(priv->dev, priv->tx_phys[tx_tail],
+ priv->tx_skb[tx_tail]->len,
+ DMA_TO_DEVICE);
+ priv->tx_phys[tx_tail] = 0;
+ }
+ pkts_compl++;
+ bytes_compl += priv->tx_skb[tx_tail]->len;
+ dev_kfree_skb(priv->tx_skb[tx_tail]);
+ priv->tx_skb[tx_tail] = NULL;
+ tx_tail = TX_NEXT(tx_tail);
+ count--;
+ }
+
+ priv->tx_tail = tx_tail;
+ smp_wmb(); /* Ensure tx_tail visible to xmit */
+
+out:
+ if (pkts_compl || bytes_compl)
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(ndev)) && (count < (TX_DESC_NUM - 1)))
+ netif_wake_queue(ndev);
+
+ return count;
+}
+
+static void hip04_start_tx_timer(struct hip04_priv *priv)
+{
+ unsigned long ns = priv->tx_coalesce_usecs * NSEC_PER_USEC / 2;
+
+ /* allow timer to fire after half the time at the earliest */
+ hrtimer_start_range_ns(&priv->tx_coalesce_timer, ns_to_ktime(ns),
+ ns, HRTIMER_MODE_REL);
+}
+
+static netdev_tx_t
+hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ unsigned int tx_head = priv->tx_head, count;
+ struct tx_desc *desc = &priv->tx_desc[tx_head];
+ dma_addr_t phys;
+
+ smp_rmb();
+ count = tx_count(tx_head, READ_ONCE(priv->tx_tail));
+ if (count == (TX_DESC_NUM - 1)) {
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+ }
+
+ phys = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, phys)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ priv->tx_skb[tx_head] = skb;
+ priv->tx_phys[tx_head] = phys;
+
+ desc->send_size = (__force u32)cpu_to_be32(skb->len);
+#if defined(CONFIG_HI13X1_GMAC)
+ desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV
+ | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT);
+ desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK);
+ desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK);
+#else
+ desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV);
+ desc->send_addr = (__force u32)cpu_to_be32(phys);
+#endif
+ phys = priv->tx_desc_dma + tx_head * sizeof(struct tx_desc);
+ desc->wb_addr = (__force u32)cpu_to_be32(phys +
+ offsetof(struct tx_desc, send_addr));
+ skb_tx_timestamp(skb);
+
+ hip04_set_xmit_desc(priv, phys);
+ count++;
+ netdev_sent_queue(ndev, skb->len);
+ priv->tx_head = TX_NEXT(tx_head);
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+
+ /* Ensure tx_head update visible to tx reclaim */
+ smp_wmb();
+
+ /* queue is getting full, better start cleaning up now */
+ if (count >= priv->tx_coalesce_frames) {
+ if (napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt and timer */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT,
+ priv->base + PPE_INTEN);
+ hrtimer_cancel(&priv->tx_coalesce_timer);
+ __napi_schedule(&priv->napi);
+ }
+ } else if (!hrtimer_is_queued(&priv->tx_coalesce_timer)) {
+ /* cleanup not pending yet, start a new timer */
+ hip04_start_tx_timer(priv);
+ }
+
+ return NETDEV_TX_OK;
+}
+
+static int hip04_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct hip04_priv *priv = container_of(napi, struct hip04_priv, napi);
+ struct net_device *ndev = priv->ndev;
+ struct net_device_stats *stats = &ndev->stats;
+ struct rx_desc *desc;
+ struct sk_buff *skb;
+ unsigned char *buf;
+ bool last = false;
+ dma_addr_t phys;
+ int rx = 0;
+ int tx_remaining;
+ u16 len;
+ u32 err;
+
+ /* clean up tx descriptors */
+ tx_remaining = hip04_tx_reclaim(ndev, false);
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+ while (priv->rx_cnt_remaining && !last) {
+ buf = priv->rx_buf[priv->rx_head];
+ skb = build_skb(buf, priv->rx_buf_size);
+ if (unlikely(!skb)) {
+ net_dbg_ratelimited("build_skb failed\n");
+ goto refill;
+ }
+
+ dma_unmap_single(priv->dev, priv->rx_phys[priv->rx_head],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[priv->rx_head] = 0;
+
+ desc = (struct rx_desc *)skb->data;
+ len = be16_to_cpu((__force __be16)desc->pkt_len);
+ err = be32_to_cpu((__force __be32)desc->pkt_err);
+
+ if (0 == len) {
+ dev_kfree_skb_any(skb);
+ last = true;
+ } else if ((err & RX_PKT_ERR) || (len >= GMAC_MAX_PKT_LEN)) {
+ dev_kfree_skb_any(skb);
+ stats->rx_dropped++;
+ stats->rx_errors++;
+ } else {
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&priv->napi, skb);
+ stats->rx_packets++;
+ stats->rx_bytes += len;
+ rx++;
+ }
+
+refill:
+ buf = netdev_alloc_frag(priv->rx_buf_size);
+ if (!buf)
+ goto done;
+ phys = dma_map_single(priv->dev, buf,
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, phys))
+ goto done;
+ priv->rx_buf[priv->rx_head] = buf;
+ priv->rx_phys[priv->rx_head] = phys;
+ hip04_set_recv_desc(priv, phys);
+
+ priv->rx_head = RX_NEXT(priv->rx_head);
+ if (rx >= budget) {
+ --priv->rx_cnt_remaining;
+ goto done;
+ }
+
+ if (--priv->rx_cnt_remaining == 0)
+ priv->rx_cnt_remaining += hip04_recv_cnt(priv);
+ }
+
+ if (!(priv->reg_inten & RCV_INT)) {
+ /* enable rx interrupt */
+ priv->reg_inten |= RCV_INT;
+ writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
+ }
+ napi_complete_done(napi, rx);
+done:
+ /* start a new timer if necessary */
+ if (rx < budget && tx_remaining)
+ hip04_start_tx_timer(priv);
+
+ return rx;
+}
+
+static irqreturn_t hip04_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &ndev->stats;
+ u32 ists = readl_relaxed(priv->base + PPE_INTSTS);
+
+ if (!ists)
+ return IRQ_NONE;
+
+ writel_relaxed(DEF_INT_MASK, priv->base + PPE_RINT);
+
+ if (unlikely(ists & DEF_INT_ERR)) {
+ if (ists & (RCV_NOBUF | RCV_DROP)) {
+ stats->rx_errors++;
+ stats->rx_dropped++;
+ netdev_err(ndev, "rx drop\n");
+ }
+ if (ists & TX_DROP) {
+ stats->tx_dropped++;
+ netdev_err(ndev, "tx drop\n");
+ }
+ }
+
+ if (ists & RCV_INT && napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+ hrtimer_cancel(&priv->tx_coalesce_timer);
+ __napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart tx_done(struct hrtimer *hrtimer)
+{
+ struct hip04_priv *priv;
+
+ priv = container_of(hrtimer, struct hip04_priv, tx_coalesce_timer);
+
+ if (napi_schedule_prep(&priv->napi)) {
+ /* disable rx interrupt */
+ priv->reg_inten &= ~(RCV_INT);
+ writel_relaxed(DEF_INT_MASK & ~RCV_INT, priv->base + PPE_INTEN);
+ __napi_schedule(&priv->napi);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+static void hip04_adjust_link(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy = priv->phy;
+
+ if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+ hip04_config_port(ndev, phy->speed, phy->duplex);
+ phy_print_status(phy);
+ }
+}
+
+static int hip04_mac_open(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ priv->rx_head = 0;
+ priv->rx_cnt_remaining = 0;
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ hip04_reset_ppe(priv);
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ dma_addr_t phys;
+
+ phys = dma_map_single(priv->dev, priv->rx_buf[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, phys))
+ return -EIO;
+
+ priv->rx_phys[i] = phys;
+ hip04_set_recv_desc(priv, phys);
+ }
+
+ if (priv->phy)
+ phy_start(priv->phy);
+
+ netdev_reset_queue(ndev);
+ netif_start_queue(ndev);
+ hip04_mac_enable(ndev);
+ napi_enable(&priv->napi);
+
+ return 0;
+}
+
+static int hip04_mac_stop(struct net_device *ndev)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ napi_disable(&priv->napi);
+ netif_stop_queue(ndev);
+ hip04_mac_disable(ndev);
+ hip04_tx_reclaim(ndev, true);
+ hip04_reset_ppe(priv);
+
+ if (priv->phy)
+ phy_stop(priv->phy);
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ if (priv->rx_phys[i]) {
+ dma_unmap_single(priv->dev, priv->rx_phys[i],
+ RX_BUF_SIZE, DMA_FROM_DEVICE);
+ priv->rx_phys[i] = 0;
+ }
+ }
+
+ return 0;
+}
+
+static void hip04_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static void hip04_tx_timeout_task(struct work_struct *work)
+{
+ struct hip04_priv *priv;
+
+ priv = container_of(work, struct hip04_priv, tx_timeout_task);
+ hip04_mac_stop(priv->ndev);
+ hip04_mac_open(priv->ndev);
+}
+
+static int hip04_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct hip04_priv *priv = netdev_priv(netdev);
+
+ ec->tx_coalesce_usecs = priv->tx_coalesce_usecs;
+ ec->tx_max_coalesced_frames = priv->tx_coalesce_frames;
+
+ return 0;
+}
+
+static int hip04_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct hip04_priv *priv = netdev_priv(netdev);
+
+ if ((ec->tx_coalesce_usecs > HIP04_MAX_TX_COALESCE_USECS ||
+ ec->tx_coalesce_usecs < HIP04_MIN_TX_COALESCE_USECS) ||
+ (ec->tx_max_coalesced_frames > HIP04_MAX_TX_COALESCE_FRAMES ||
+ ec->tx_max_coalesced_frames < HIP04_MIN_TX_COALESCE_FRAMES))
+ return -EINVAL;
+
+ priv->tx_coalesce_usecs = ec->tx_coalesce_usecs;
+ priv->tx_coalesce_frames = ec->tx_max_coalesced_frames;
+
+ return 0;
+}
+
+static void hip04_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
+}
+
+static const struct ethtool_ops hip04_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_USECS |
+ ETHTOOL_COALESCE_TX_MAX_FRAMES,
+ .get_coalesce = hip04_get_coalesce,
+ .set_coalesce = hip04_set_coalesce,
+ .get_drvinfo = hip04_get_drvinfo,
+};
+
+static const struct net_device_ops hip04_netdev_ops = {
+ .ndo_open = hip04_mac_open,
+ .ndo_stop = hip04_mac_stop,
+ .ndo_start_xmit = hip04_mac_start_xmit,
+ .ndo_set_mac_address = hip04_set_mac_address,
+ .ndo_tx_timeout = hip04_timeout,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static int hip04_alloc_ring(struct net_device *ndev, struct device *d)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ priv->tx_desc = dma_alloc_coherent(d,
+ TX_DESC_NUM * sizeof(struct tx_desc),
+ &priv->tx_desc_dma, GFP_KERNEL);
+ if (!priv->tx_desc)
+ return -ENOMEM;
+
+ priv->rx_buf_size = RX_BUF_SIZE +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ priv->rx_buf[i] = netdev_alloc_frag(priv->rx_buf_size);
+ if (!priv->rx_buf[i])
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hip04_free_ring(struct net_device *ndev, struct device *d)
+{
+ struct hip04_priv *priv = netdev_priv(ndev);
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++)
+ if (priv->rx_buf[i])
+ skb_free_frag(priv->rx_buf[i]);
+
+ for (i = 0; i < TX_DESC_NUM; i++)
+ if (priv->tx_skb[i])
+ dev_kfree_skb_any(priv->tx_skb[i]);
+
+ dma_free_coherent(d, TX_DESC_NUM * sizeof(struct tx_desc),
+ priv->tx_desc, priv->tx_desc_dma);
+}
+
+static int hip04_mac_probe(struct platform_device *pdev)
+{
+ struct device *d = &pdev->dev;
+ struct device_node *node = d->of_node;
+ struct of_phandle_args arg;
+ struct net_device *ndev;
+ struct hip04_priv *priv;
+ int irq;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct hip04_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->dev = d;
+ priv->ndev = ndev;
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto init_fail;
+ }
+
+#if defined(CONFIG_HI13X1_GMAC)
+ priv->sysctrl_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->sysctrl_base)) {
+ ret = PTR_ERR(priv->sysctrl_base);
+ goto init_fail;
+ }
+#endif
+
+ ret = of_parse_phandle_with_fixed_args(node, "port-handle", 3, 0, &arg);
+ if (ret < 0) {
+ dev_warn(d, "no port-handle\n");
+ goto init_fail;
+ }
+
+ priv->port = arg.args[0];
+ priv->chan = arg.args[1] * RX_DESC_NUM;
+ priv->group = arg.args[2];
+
+ hrtimer_init(&priv->tx_coalesce_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ /* BQL will try to keep the TX queue as short as possible, but it can't
+ * be faster than tx_coalesce_usecs, so we need a fast timeout here,
+ * but also long enough to gather up enough frames to ensure we don't
+ * get more interrupts than necessary.
+ * 200us is enough for 16 frames of 1500 bytes at gigabit ethernet rate
+ */
+ priv->tx_coalesce_frames = TX_DESC_NUM * 3 / 4;
+ priv->tx_coalesce_usecs = 200;
+ priv->tx_coalesce_timer.function = tx_done;
+
+ priv->map = syscon_node_to_regmap(arg.np);
+ if (IS_ERR(priv->map)) {
+ dev_warn(d, "no syscon hisilicon,hip04-ppe\n");
+ ret = PTR_ERR(priv->map);
+ goto init_fail;
+ }
+
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
+ dev_warn(d, "not find phy-mode\n");
+ goto init_fail;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ ret = -EINVAL;
+ goto init_fail;
+ }
+
+ ret = devm_request_irq(d, irq, hip04_mac_interrupt,
+ 0, pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto init_fail;
+ }
+
+ priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if (priv->phy_node) {
+ priv->phy = of_phy_connect(ndev, priv->phy_node,
+ &hip04_adjust_link,
+ 0, priv->phy_mode);
+ if (!priv->phy) {
+ ret = -EPROBE_DEFER;
+ goto init_fail;
+ }
+ }
+
+ INIT_WORK(&priv->tx_timeout_task, hip04_tx_timeout_task);
+
+ ndev->netdev_ops = &hip04_netdev_ops;
+ ndev->ethtool_ops = &hip04_ethtool_ops;
+ ndev->watchdog_timeo = TX_TIMEOUT;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->irq = irq;
+ netif_napi_add(ndev, &priv->napi, hip04_rx_poll, NAPI_POLL_WEIGHT);
+
+ hip04_reset_dreq(priv);
+ hip04_reset_ppe(priv);
+ if (priv->phy_mode == PHY_INTERFACE_MODE_MII)
+ hip04_config_port(ndev, SPEED_100, DUPLEX_FULL);
+
+ hip04_config_fifo(priv);
+ eth_random_addr(ndev->dev_addr);
+ hip04_update_mac_address(ndev);
+
+ ret = hip04_alloc_ring(ndev, d);
+ if (ret) {
+ netdev_err(ndev, "alloc ring fail\n");
+ goto alloc_fail;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret)
+ goto alloc_fail;
+
+ return 0;
+
+alloc_fail:
+ hip04_free_ring(ndev, d);
+init_fail:
+ of_node_put(priv->phy_node);
+ free_netdev(ndev);
+ return ret;
+}
+
+static int hip04_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hip04_priv *priv = netdev_priv(ndev);
+ struct device *d = &pdev->dev;
+
+ if (priv->phy)
+ phy_disconnect(priv->phy);
+
+ hip04_free_ring(ndev, d);
+ unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id hip04_mac_match[] = {
+ { .compatible = "hisilicon,hip04-mac" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, hip04_mac_match);
+
+static struct platform_driver hip04_mac_driver = {
+ .probe = hip04_mac_probe,
+ .remove = hip04_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = hip04_mac_match,
+ },
+};
+module_platform_driver(hip04_mac_driver);
+
+MODULE_DESCRIPTION("HISILICON P04 Ethernet driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
new file mode 100644
index 000000000..c16dfd869
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -0,0 +1,978 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Hisilicon Fast Ethernet MAC Driver
+ *
+ * Copyright (c) 2016 HiSilicon Technologies Co., Ltd.
+ */
+
+#include <linux/circ_buf.h>
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+/* MAC control register list */
+#define MAC_PORTSEL 0x0200
+#define MAC_PORTSEL_STAT_CPU BIT(0)
+#define MAC_PORTSEL_RMII BIT(1)
+#define MAC_PORTSET 0x0208
+#define MAC_PORTSET_DUPLEX_FULL BIT(0)
+#define MAC_PORTSET_LINKED BIT(1)
+#define MAC_PORTSET_SPEED_100M BIT(2)
+#define MAC_SET 0x0210
+#define MAX_FRAME_SIZE 1600
+#define MAX_FRAME_SIZE_MASK GENMASK(10, 0)
+#define BIT_PAUSE_EN BIT(18)
+#define RX_COALESCE_SET 0x0340
+#define RX_COALESCED_FRAME_OFFSET 24
+#define RX_COALESCED_FRAMES 8
+#define RX_COALESCED_TIMER 0x74
+#define QLEN_SET 0x0344
+#define RX_DEPTH_OFFSET 8
+#define MAX_HW_FIFO_DEPTH 64
+#define HW_TX_FIFO_DEPTH 12
+#define HW_RX_FIFO_DEPTH (MAX_HW_FIFO_DEPTH - HW_TX_FIFO_DEPTH)
+#define IQFRM_DES 0x0354
+#define RX_FRAME_LEN_MASK GENMASK(11, 0)
+#define IQ_ADDR 0x0358
+#define EQ_ADDR 0x0360
+#define EQFRM_LEN 0x0364
+#define ADDRQ_STAT 0x036C
+#define TX_CNT_INUSE_MASK GENMASK(5, 0)
+#define BIT_TX_READY BIT(24)
+#define BIT_RX_READY BIT(25)
+/* global control register list */
+#define GLB_HOSTMAC_L32 0x0000
+#define GLB_HOSTMAC_H16 0x0004
+#define GLB_SOFT_RESET 0x0008
+#define SOFT_RESET_ALL BIT(0)
+#define GLB_FWCTRL 0x0010
+#define FWCTRL_VLAN_ENABLE BIT(0)
+#define FWCTRL_FW2CPU_ENA BIT(5)
+#define FWCTRL_FWALL2CPU BIT(7)
+#define GLB_MACTCTRL 0x0014
+#define MACTCTRL_UNI2CPU BIT(1)
+#define MACTCTRL_MULTI2CPU BIT(3)
+#define MACTCTRL_BROAD2CPU BIT(5)
+#define MACTCTRL_MACT_ENA BIT(7)
+#define GLB_IRQ_STAT 0x0030
+#define GLB_IRQ_ENA 0x0034
+#define IRQ_ENA_PORT0_MASK GENMASK(7, 0)
+#define IRQ_ENA_PORT0 BIT(18)
+#define IRQ_ENA_ALL BIT(19)
+#define GLB_IRQ_RAW 0x0038
+#define IRQ_INT_RX_RDY BIT(0)
+#define IRQ_INT_TX_PER_PACKET BIT(1)
+#define IRQ_INT_TX_FIFO_EMPTY BIT(6)
+#define IRQ_INT_MULTI_RXRDY BIT(7)
+#define DEF_INT_MASK (IRQ_INT_MULTI_RXRDY | \
+ IRQ_INT_TX_PER_PACKET | \
+ IRQ_INT_TX_FIFO_EMPTY)
+#define GLB_MAC_L32_BASE 0x0100
+#define GLB_MAC_H16_BASE 0x0104
+#define MACFLT_HI16_MASK GENMASK(15, 0)
+#define BIT_MACFLT_ENA BIT(17)
+#define BIT_MACFLT_FW2CPU BIT(21)
+#define GLB_MAC_H16(reg) (GLB_MAC_H16_BASE + ((reg) * 0x8))
+#define GLB_MAC_L32(reg) (GLB_MAC_L32_BASE + ((reg) * 0x8))
+#define MAX_MAC_FILTER_NUM 8
+#define MAX_UNICAST_ADDRESSES 2
+#define MAX_MULTICAST_ADDRESSES (MAX_MAC_FILTER_NUM - \
+ MAX_UNICAST_ADDRESSES)
+/* software tx and rx queue number, should be power of 2 */
+#define TXQ_NUM 64
+#define RXQ_NUM 128
+#define FEMAC_POLL_WEIGHT 16
+
+#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
+
+enum phy_reset_delays {
+ PRE_DELAY,
+ PULSE,
+ POST_DELAY,
+ DELAYS_NUM,
+};
+
+struct hisi_femac_queue {
+ struct sk_buff **skb;
+ dma_addr_t *dma_phys;
+ int num;
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct hisi_femac_priv {
+ void __iomem *port_base;
+ void __iomem *glb_base;
+ struct clk *clk;
+ struct reset_control *mac_rst;
+ struct reset_control *phy_rst;
+ u32 phy_reset_delays[DELAYS_NUM];
+ u32 link_status;
+
+ struct device *dev;
+ struct net_device *ndev;
+
+ struct hisi_femac_queue txq;
+ struct hisi_femac_queue rxq;
+ u32 tx_fifo_used_cnt;
+ struct napi_struct napi;
+};
+
+static void hisi_femac_irq_enable(struct hisi_femac_priv *priv, int irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val | irqs, priv->glb_base + GLB_IRQ_ENA);
+}
+
+static void hisi_femac_irq_disable(struct hisi_femac_priv *priv, int irqs)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_IRQ_ENA);
+ writel(val & (~irqs), priv->glb_base + GLB_IRQ_ENA);
+}
+
+static void hisi_femac_tx_dma_unmap(struct hisi_femac_priv *priv,
+ struct sk_buff *skb, unsigned int pos)
+{
+ dma_addr_t dma_addr;
+
+ dma_addr = priv->txq.dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, skb->len, DMA_TO_DEVICE);
+}
+
+static void hisi_femac_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *txq = &priv->txq;
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ u32 val;
+
+ netif_tx_lock(dev);
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ while (val < priv->tx_fifo_used_cnt) {
+ skb = txq->skb[txq->tail];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "xmitq_cnt_inuse=%d, tx_fifo_used=%d\n",
+ val, priv->tx_fifo_used_cnt);
+ break;
+ }
+ hisi_femac_tx_dma_unmap(priv, skb, txq->tail);
+ pkts_compl++;
+ bytes_compl += skb->len;
+ dev_kfree_skb_any(skb);
+
+ priv->tx_fifo_used_cnt--;
+
+ val = readl(priv->port_base + ADDRQ_STAT) & TX_CNT_INUSE_MASK;
+ txq->skb[txq->tail] = NULL;
+ txq->tail = (txq->tail + 1) % txq->num;
+ }
+
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(dev)) && pkts_compl)
+ netif_wake_queue(dev);
+
+ netif_tx_unlock(dev);
+}
+
+static void hisi_femac_adjust_link(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = dev->phydev;
+ u32 status = 0;
+
+ if (phy->link)
+ status |= MAC_PORTSET_LINKED;
+ if (phy->duplex == DUPLEX_FULL)
+ status |= MAC_PORTSET_DUPLEX_FULL;
+ if (phy->speed == SPEED_100)
+ status |= MAC_PORTSET_SPEED_100M;
+
+ if ((status != priv->link_status) &&
+ ((status | priv->link_status) & MAC_PORTSET_LINKED)) {
+ writel(status, priv->port_base + MAC_PORTSET);
+ priv->link_status = status;
+ phy_print_status(phy);
+ }
+}
+
+static void hisi_femac_rx_refill(struct hisi_femac_priv *priv)
+{
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ u32 pos;
+ u32 len = MAX_FRAME_SIZE;
+ dma_addr_t addr;
+
+ pos = rxq->head;
+ while (readl(priv->port_base + ADDRQ_STAT) & BIT_RX_READY) {
+ if (!CIRC_SPACE(pos, rxq->tail, rxq->num))
+ break;
+ if (unlikely(rxq->skb[pos])) {
+ netdev_err(priv->ndev, "err skb[%d]=%p\n",
+ pos, rxq->skb[pos]);
+ break;
+ }
+ skb = netdev_alloc_skb_ip_align(priv->ndev, len);
+ if (unlikely(!skb))
+ break;
+
+ addr = dma_map_single(priv->dev, skb->data, len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rxq->dma_phys[pos] = addr;
+ rxq->skb[pos] = skb;
+ writel(addr, priv->port_base + IQ_ADDR);
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->head = pos;
+}
+
+static int hisi_femac_rx(struct net_device *dev, int limit)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ dma_addr_t addr;
+ u32 rx_pkt_info, pos, len, rx_pkts_num = 0;
+
+ pos = rxq->tail;
+ while (readl(priv->glb_base + GLB_IRQ_RAW) & IRQ_INT_RX_RDY) {
+ rx_pkt_info = readl(priv->port_base + IQFRM_DES);
+ len = rx_pkt_info & RX_FRAME_LEN_MASK;
+ len -= ETH_FCS_LEN;
+
+ /* tell hardware we will deal with this packet */
+ writel(IRQ_INT_RX_RDY, priv->glb_base + GLB_IRQ_RAW);
+
+ rx_pkts_num++;
+
+ skb = rxq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "rx skb NULL. pos=%d\n", pos);
+ break;
+ }
+ rxq->skb[pos] = NULL;
+
+ addr = rxq->dma_phys[pos];
+ dma_unmap_single(priv->dev, addr, MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ skb_put(skb, len);
+ if (unlikely(skb->len > MAX_FRAME_SIZE)) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+next:
+ pos = (pos + 1) % rxq->num;
+ if (rx_pkts_num >= limit)
+ break;
+ }
+ rxq->tail = pos;
+
+ hisi_femac_rx_refill(priv);
+
+ return rx_pkts_num;
+}
+
+static int hisi_femac_poll(struct napi_struct *napi, int budget)
+{
+ struct hisi_femac_priv *priv = container_of(napi,
+ struct hisi_femac_priv, napi);
+ struct net_device *dev = priv->ndev;
+ int work_done = 0, task = budget;
+ int ints, num;
+
+ do {
+ hisi_femac_xmit_reclaim(dev);
+ num = hisi_femac_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if (work_done >= budget)
+ break;
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ hisi_femac_irq_enable(priv, DEF_INT_MASK &
+ (~IRQ_INT_TX_PER_PACKET));
+ }
+
+ return work_done;
+}
+
+static irqreturn_t hisi_femac_interrupt(int irq, void *dev_id)
+{
+ int ints;
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ ints = readl(priv->glb_base + GLB_IRQ_RAW);
+
+ if (likely(ints & DEF_INT_MASK)) {
+ writel(ints & DEF_INT_MASK,
+ priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_disable(priv, DEF_INT_MASK);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hisi_femac_init_queue(struct device *dev,
+ struct hisi_femac_queue *queue,
+ unsigned int num)
+{
+ queue->skb = devm_kcalloc(dev, num, sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!queue->skb)
+ return -ENOMEM;
+
+ queue->dma_phys = devm_kcalloc(dev, num, sizeof(dma_addr_t),
+ GFP_KERNEL);
+ if (!queue->dma_phys)
+ return -ENOMEM;
+
+ queue->num = num;
+ queue->head = 0;
+ queue->tail = 0;
+
+ return 0;
+}
+
+static int hisi_femac_init_tx_and_rx_queues(struct hisi_femac_priv *priv)
+{
+ int ret;
+
+ ret = hisi_femac_init_queue(priv->dev, &priv->txq, TXQ_NUM);
+ if (ret)
+ return ret;
+
+ ret = hisi_femac_init_queue(priv->dev, &priv->rxq, RXQ_NUM);
+ if (ret)
+ return ret;
+
+ priv->tx_fifo_used_cnt = 0;
+
+ return 0;
+}
+
+static void hisi_femac_free_skb_rings(struct hisi_femac_priv *priv)
+{
+ struct hisi_femac_queue *txq = &priv->txq;
+ struct hisi_femac_queue *rxq = &priv->rxq;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ u32 pos;
+
+ pos = rxq->tail;
+ while (pos != rxq->head) {
+ skb = rxq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(priv->ndev, "NULL rx skb. pos=%d, head=%d\n",
+ pos, rxq->head);
+ continue;
+ }
+
+ dma_addr = rxq->dma_phys[pos];
+ dma_unmap_single(priv->dev, dma_addr, MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ dev_kfree_skb_any(skb);
+ rxq->skb[pos] = NULL;
+ pos = (pos + 1) % rxq->num;
+ }
+ rxq->tail = pos;
+
+ pos = txq->tail;
+ while (pos != txq->head) {
+ skb = txq->skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(priv->ndev, "NULL tx skb. pos=%d, head=%d\n",
+ pos, txq->head);
+ continue;
+ }
+ hisi_femac_tx_dma_unmap(priv, skb, pos);
+ dev_kfree_skb_any(skb);
+ txq->skb[pos] = NULL;
+ pos = (pos + 1) % txq->num;
+ }
+ txq->tail = pos;
+ priv->tx_fifo_used_cnt = 0;
+}
+
+static int hisi_femac_set_hw_mac_addr(struct hisi_femac_priv *priv,
+ unsigned char *mac)
+{
+ u32 reg;
+
+ reg = mac[1] | (mac[0] << 8);
+ writel(reg, priv->glb_base + GLB_HOSTMAC_H16);
+
+ reg = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel(reg, priv->glb_base + GLB_HOSTMAC_L32);
+
+ return 0;
+}
+
+static int hisi_femac_port_reset(struct hisi_femac_priv *priv)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_SOFT_RESET);
+ val |= SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ usleep_range(500, 800);
+
+ val &= ~SOFT_RESET_ALL;
+ writel(val, priv->glb_base + GLB_SOFT_RESET);
+
+ return 0;
+}
+
+static int hisi_femac_net_open(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ hisi_femac_port_reset(priv);
+ hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
+ hisi_femac_rx_refill(priv);
+
+ netif_carrier_off(dev);
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ priv->link_status = 0;
+ if (dev->phydev)
+ phy_start(dev->phydev);
+
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK);
+
+ return 0;
+}
+
+static int hisi_femac_net_close(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ hisi_femac_irq_disable(priv, IRQ_ENA_PORT0);
+
+ if (dev->phydev)
+ phy_stop(dev->phydev);
+
+ netif_stop_queue(dev);
+ napi_disable(&priv->napi);
+
+ hisi_femac_free_skb_rings(priv);
+
+ return 0;
+}
+
+static netdev_tx_t hisi_femac_net_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct hisi_femac_queue *txq = &priv->txq;
+ dma_addr_t addr;
+ u32 val;
+
+ val = readl(priv->port_base + ADDRQ_STAT);
+ val &= BIT_TX_READY;
+ if (!val) {
+ hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (unlikely(!CIRC_SPACE(txq->head, txq->tail,
+ txq->num))) {
+ hisi_femac_irq_enable(priv, IRQ_INT_TX_PER_PACKET);
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data,
+ skb->len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ txq->dma_phys[txq->head] = addr;
+
+ txq->skb[txq->head] = skb;
+ txq->head = (txq->head + 1) % txq->num;
+
+ writel(addr, priv->port_base + EQ_ADDR);
+ writel(skb->len + ETH_FCS_LEN, priv->port_base + EQFRM_LEN);
+
+ priv->tx_fifo_used_cnt++;
+
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static int hisi_femac_set_mac_address(struct net_device *dev, void *p)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+ struct sockaddr *skaddr = p;
+
+ if (!is_valid_ether_addr(skaddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(dev->dev_addr, skaddr->sa_data, dev->addr_len);
+ dev->addr_assign_type &= ~NET_ADDR_RANDOM;
+
+ hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
+
+ return 0;
+}
+
+static void hisi_femac_enable_hw_addr_filter(struct hisi_femac_priv *priv,
+ unsigned int reg_n, bool enable)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MAC_H16(reg_n));
+ if (enable)
+ val |= BIT_MACFLT_ENA;
+ else
+ val &= ~BIT_MACFLT_ENA;
+ writel(val, priv->glb_base + GLB_MAC_H16(reg_n));
+}
+
+static void hisi_femac_set_hw_addr_filter(struct hisi_femac_priv *priv,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ unsigned int high, low;
+ u32 val;
+
+ high = GLB_MAC_H16(reg_n);
+ low = GLB_MAC_L32(reg_n);
+
+ val = (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) | addr[5];
+ writel(val, priv->glb_base + low);
+
+ val = readl(priv->glb_base + high);
+ val &= ~MACFLT_HI16_MASK;
+ val |= ((addr[0] << 8) | addr[1]);
+ val |= (BIT_MACFLT_ENA | BIT_MACFLT_FW2CPU);
+ writel(val, priv->glb_base + high);
+}
+
+static void hisi_femac_set_promisc_mode(struct hisi_femac_priv *priv,
+ bool promisc_mode)
+{
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ if (promisc_mode)
+ val |= FWCTRL_FWALL2CPU;
+ else
+ val &= ~FWCTRL_FWALL2CPU;
+ writel(val, priv->glb_base + GLB_FWCTRL);
+}
+
+/* Handle multiple multicast addresses (perfect filtering)*/
+static void hisi_femac_set_mc_addr_filter(struct hisi_femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if ((netdev_mc_count(dev) > MAX_MULTICAST_ADDRESSES) ||
+ (dev->flags & IFF_ALLMULTI)) {
+ val |= MACTCTRL_MULTI2CPU;
+ } else {
+ int reg = MAX_UNICAST_ADDRESSES;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ for (i = reg; i < MAX_MAC_FILTER_NUM; i++)
+ hisi_femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_mc_addr(ha, dev) {
+ hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_MULTI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+/* Handle multiple unicast addresses (perfect filtering)*/
+static void hisi_femac_set_uc_addr_filter(struct hisi_femac_priv *priv)
+{
+ struct net_device *dev = priv->ndev;
+ u32 val;
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ if (netdev_uc_count(dev) > MAX_UNICAST_ADDRESSES) {
+ val |= MACTCTRL_UNI2CPU;
+ } else {
+ int reg = 0;
+ int i;
+ struct netdev_hw_addr *ha;
+
+ for (i = reg; i < MAX_UNICAST_ADDRESSES; i++)
+ hisi_femac_enable_hw_addr_filter(priv, i, false);
+
+ netdev_for_each_uc_addr(ha, dev) {
+ hisi_femac_set_hw_addr_filter(priv, ha->addr, reg);
+ reg++;
+ }
+ val &= ~MACTCTRL_UNI2CPU;
+ }
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+}
+
+static void hisi_femac_net_set_rx_mode(struct net_device *dev)
+{
+ struct hisi_femac_priv *priv = netdev_priv(dev);
+
+ if (dev->flags & IFF_PROMISC) {
+ hisi_femac_set_promisc_mode(priv, true);
+ } else {
+ hisi_femac_set_promisc_mode(priv, false);
+ hisi_femac_set_mc_addr_filter(priv);
+ hisi_femac_set_uc_addr_filter(priv);
+ }
+}
+
+static const struct ethtool_ops hisi_femac_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static const struct net_device_ops hisi_femac_netdev_ops = {
+ .ndo_open = hisi_femac_net_open,
+ .ndo_stop = hisi_femac_net_close,
+ .ndo_start_xmit = hisi_femac_net_xmit,
+ .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_set_mac_address = hisi_femac_set_mac_address,
+ .ndo_set_rx_mode = hisi_femac_net_set_rx_mode,
+};
+
+static void hisi_femac_core_reset(struct hisi_femac_priv *priv)
+{
+ reset_control_assert(priv->mac_rst);
+ reset_control_deassert(priv->mac_rst);
+}
+
+static void hisi_femac_sleep_us(u32 time_us)
+{
+ u32 time_ms;
+
+ if (!time_us)
+ return;
+
+ time_ms = DIV_ROUND_UP(time_us, 1000);
+ if (time_ms < 20)
+ usleep_range(time_us, time_us + 500);
+ else
+ msleep(time_ms);
+}
+
+static void hisi_femac_phy_reset(struct hisi_femac_priv *priv)
+{
+ /* To make sure PHY hardware reset success,
+ * we must keep PHY in deassert state first and
+ * then complete the hardware reset operation
+ */
+ reset_control_deassert(priv->phy_rst);
+ hisi_femac_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
+
+ reset_control_assert(priv->phy_rst);
+ /* delay some time to ensure reset ok,
+ * this depends on PHY hardware feature
+ */
+ hisi_femac_sleep_us(priv->phy_reset_delays[PULSE]);
+ reset_control_deassert(priv->phy_rst);
+ /* delay some time to ensure later MDIO access */
+ hisi_femac_sleep_us(priv->phy_reset_delays[POST_DELAY]);
+}
+
+static void hisi_femac_port_init(struct hisi_femac_priv *priv)
+{
+ u32 val;
+
+ /* MAC gets link status info and phy mode by software config */
+ val = MAC_PORTSEL_STAT_CPU;
+ if (priv->ndev->phydev->interface == PHY_INTERFACE_MODE_RMII)
+ val |= MAC_PORTSEL_RMII;
+ writel(val, priv->port_base + MAC_PORTSEL);
+
+ /*clear all interrupt status */
+ writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
+ hisi_femac_irq_disable(priv, IRQ_ENA_PORT0_MASK | IRQ_ENA_PORT0);
+
+ val = readl(priv->glb_base + GLB_FWCTRL);
+ val &= ~(FWCTRL_VLAN_ENABLE | FWCTRL_FWALL2CPU);
+ val |= FWCTRL_FW2CPU_ENA;
+ writel(val, priv->glb_base + GLB_FWCTRL);
+
+ val = readl(priv->glb_base + GLB_MACTCTRL);
+ val |= (MACTCTRL_BROAD2CPU | MACTCTRL_MACT_ENA);
+ writel(val, priv->glb_base + GLB_MACTCTRL);
+
+ val = readl(priv->port_base + MAC_SET);
+ val &= ~MAX_FRAME_SIZE_MASK;
+ val |= MAX_FRAME_SIZE;
+ writel(val, priv->port_base + MAC_SET);
+
+ val = RX_COALESCED_TIMER |
+ (RX_COALESCED_FRAMES << RX_COALESCED_FRAME_OFFSET);
+ writel(val, priv->port_base + RX_COALESCE_SET);
+
+ val = (HW_RX_FIFO_DEPTH << RX_DEPTH_OFFSET) | HW_TX_FIFO_DEPTH;
+ writel(val, priv->port_base + QLEN_SET);
+}
+
+static int hisi_femac_drv_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct net_device *ndev;
+ struct hisi_femac_priv *priv;
+ struct phy_device *phy;
+ const char *mac_addr;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->ndev = ndev;
+
+ priv->port_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->port_base)) {
+ ret = PTR_ERR(priv->port_base);
+ goto out_free_netdev;
+ }
+
+ priv->glb_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->glb_base)) {
+ ret = PTR_ERR(priv->glb_base);
+ goto out_free_netdev;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ dev_err(dev, "failed to get clk\n");
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret) {
+ dev_err(dev, "failed to enable clk %d\n", ret);
+ goto out_free_netdev;
+ }
+
+ priv->mac_rst = devm_reset_control_get(dev, "mac");
+ if (IS_ERR(priv->mac_rst)) {
+ ret = PTR_ERR(priv->mac_rst);
+ goto out_disable_clk;
+ }
+ hisi_femac_core_reset(priv);
+
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->phy_rst)) {
+ priv->phy_rst = NULL;
+ } else {
+ ret = of_property_read_u32_array(node,
+ PHY_RESET_DELAYS_PROPERTY,
+ priv->phy_reset_delays,
+ DELAYS_NUM);
+ if (ret)
+ goto out_disable_clk;
+ hisi_femac_phy_reset(priv);
+ }
+
+ phy = of_phy_get_and_connect(ndev, node, hisi_femac_adjust_link);
+ if (!phy) {
+ dev_err(dev, "connect to PHY failed!\n");
+ ret = -ENODEV;
+ goto out_disable_clk;
+ }
+
+ phy_attached_print(phy, "phy_id=0x%.8lx, phy_mode=%s\n",
+ (unsigned long)phy->phy_id,
+ phy_modes(phy->interface));
+
+ mac_addr = of_get_mac_address(node);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(dev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ ndev->watchdog_timeo = 6 * HZ;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hisi_femac_netdev_ops;
+ ndev->ethtool_ops = &hisi_femac_ethtools_ops;
+ netif_napi_add(ndev, &priv->napi, hisi_femac_poll, FEMAC_POLL_WEIGHT);
+
+ hisi_femac_port_init(priv);
+
+ ret = hisi_femac_init_tx_and_rx_queues(priv);
+ if (ret)
+ goto out_disconnect_phy;
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
+ ret = -ENODEV;
+ goto out_disconnect_phy;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, hisi_femac_interrupt,
+ IRQF_SHARED, pdev->name, ndev);
+ if (ret) {
+ dev_err(dev, "devm_request_irq %d failed!\n", ndev->irq);
+ goto out_disconnect_phy;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "register_netdev failed!\n");
+ goto out_disconnect_phy;
+ }
+
+ return ret;
+
+out_disconnect_phy:
+ netif_napi_del(&priv->napi);
+ phy_disconnect(phy);
+out_disable_clk:
+ clk_disable_unprepare(priv->clk);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int hisi_femac_drv_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+
+ phy_disconnect(ndev->phydev);
+ clk_disable_unprepare(priv->clk);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int hisi_femac_drv_suspend(struct platform_device *pdev,
+ pm_message_t state)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ disable_irq(ndev->irq);
+ if (netif_running(ndev)) {
+ hisi_femac_net_close(ndev);
+ netif_device_detach(ndev);
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static int hisi_femac_drv_resume(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hisi_femac_priv *priv = netdev_priv(ndev);
+
+ clk_prepare_enable(priv->clk);
+ if (priv->phy_rst)
+ hisi_femac_phy_reset(priv);
+
+ if (netif_running(ndev)) {
+ hisi_femac_port_init(priv);
+ hisi_femac_net_open(ndev);
+ netif_device_attach(ndev);
+ }
+ enable_irq(ndev->irq);
+
+ return 0;
+}
+#endif
+
+static const struct of_device_id hisi_femac_match[] = {
+ {.compatible = "hisilicon,hisi-femac-v1",},
+ {.compatible = "hisilicon,hisi-femac-v2",},
+ {.compatible = "hisilicon,hi3516cv300-femac",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hisi_femac_match);
+
+static struct platform_driver hisi_femac_driver = {
+ .driver = {
+ .name = "hisi-femac",
+ .of_match_table = hisi_femac_match,
+ },
+ .probe = hisi_femac_drv_probe,
+ .remove = hisi_femac_drv_remove,
+#ifdef CONFIG_PM
+ .suspend = hisi_femac_drv_suspend,
+ .resume = hisi_femac_drv_resume,
+#endif
+};
+
+module_platform_driver(hisi_femac_driver);
+
+MODULE_DESCRIPTION("Hisilicon Fast Ethernet MAC driver");
+MODULE_AUTHOR("Dongpo Li <lidongpo@hisilicon.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hisi-femac");
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
new file mode 100644
index 000000000..43f3146ca
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -0,0 +1,1333 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/reset.h>
+#include <linux/clk.h>
+#include <linux/circ_buf.h>
+
+#define STATION_ADDR_LOW 0x0000
+#define STATION_ADDR_HIGH 0x0004
+#define MAC_DUPLEX_HALF_CTRL 0x0008
+#define MAX_FRM_SIZE 0x003c
+#define PORT_MODE 0x0040
+#define PORT_EN 0x0044
+#define BITS_TX_EN BIT(2)
+#define BITS_RX_EN BIT(1)
+#define REC_FILT_CONTROL 0x0064
+#define BIT_CRC_ERR_PASS BIT(5)
+#define BIT_PAUSE_FRM_PASS BIT(4)
+#define BIT_VLAN_DROP_EN BIT(3)
+#define BIT_BC_DROP_EN BIT(2)
+#define BIT_MC_MATCH_EN BIT(1)
+#define BIT_UC_MATCH_EN BIT(0)
+#define PORT_MC_ADDR_LOW 0x0068
+#define PORT_MC_ADDR_HIGH 0x006C
+#define CF_CRC_STRIP 0x01b0
+#define MODE_CHANGE_EN 0x01b4
+#define BIT_MODE_CHANGE_EN BIT(0)
+#define COL_SLOT_TIME 0x01c0
+#define RECV_CONTROL 0x01e0
+#define BIT_STRIP_PAD_EN BIT(3)
+#define BIT_RUNT_PKT_EN BIT(4)
+#define CONTROL_WORD 0x0214
+#define MDIO_SINGLE_CMD 0x03c0
+#define MDIO_SINGLE_DATA 0x03c4
+#define MDIO_CTRL 0x03cc
+#define MDIO_RDATA_STATUS 0x03d0
+
+#define MDIO_START BIT(20)
+#define MDIO_R_VALID BIT(0)
+#define MDIO_READ (BIT(17) | MDIO_START)
+#define MDIO_WRITE (BIT(16) | MDIO_START)
+
+#define RX_FQ_START_ADDR 0x0500
+#define RX_FQ_DEPTH 0x0504
+#define RX_FQ_WR_ADDR 0x0508
+#define RX_FQ_RD_ADDR 0x050c
+#define RX_FQ_VLDDESC_CNT 0x0510
+#define RX_FQ_ALEMPTY_TH 0x0514
+#define RX_FQ_REG_EN 0x0518
+#define BITS_RX_FQ_START_ADDR_EN BIT(2)
+#define BITS_RX_FQ_DEPTH_EN BIT(1)
+#define BITS_RX_FQ_RD_ADDR_EN BIT(0)
+#define RX_FQ_ALFULL_TH 0x051c
+#define RX_BQ_START_ADDR 0x0520
+#define RX_BQ_DEPTH 0x0524
+#define RX_BQ_WR_ADDR 0x0528
+#define RX_BQ_RD_ADDR 0x052c
+#define RX_BQ_FREE_DESC_CNT 0x0530
+#define RX_BQ_ALEMPTY_TH 0x0534
+#define RX_BQ_REG_EN 0x0538
+#define BITS_RX_BQ_START_ADDR_EN BIT(2)
+#define BITS_RX_BQ_DEPTH_EN BIT(1)
+#define BITS_RX_BQ_WR_ADDR_EN BIT(0)
+#define RX_BQ_ALFULL_TH 0x053c
+#define TX_BQ_START_ADDR 0x0580
+#define TX_BQ_DEPTH 0x0584
+#define TX_BQ_WR_ADDR 0x0588
+#define TX_BQ_RD_ADDR 0x058c
+#define TX_BQ_VLDDESC_CNT 0x0590
+#define TX_BQ_ALEMPTY_TH 0x0594
+#define TX_BQ_REG_EN 0x0598
+#define BITS_TX_BQ_START_ADDR_EN BIT(2)
+#define BITS_TX_BQ_DEPTH_EN BIT(1)
+#define BITS_TX_BQ_RD_ADDR_EN BIT(0)
+#define TX_BQ_ALFULL_TH 0x059c
+#define TX_RQ_START_ADDR 0x05a0
+#define TX_RQ_DEPTH 0x05a4
+#define TX_RQ_WR_ADDR 0x05a8
+#define TX_RQ_RD_ADDR 0x05ac
+#define TX_RQ_FREE_DESC_CNT 0x05b0
+#define TX_RQ_ALEMPTY_TH 0x05b4
+#define TX_RQ_REG_EN 0x05b8
+#define BITS_TX_RQ_START_ADDR_EN BIT(2)
+#define BITS_TX_RQ_DEPTH_EN BIT(1)
+#define BITS_TX_RQ_WR_ADDR_EN BIT(0)
+#define TX_RQ_ALFULL_TH 0x05bc
+#define RAW_PMU_INT 0x05c0
+#define ENA_PMU_INT 0x05c4
+#define STATUS_PMU_INT 0x05c8
+#define MAC_FIFO_ERR_IN BIT(30)
+#define TX_RQ_IN_TIMEOUT_INT BIT(29)
+#define RX_BQ_IN_TIMEOUT_INT BIT(28)
+#define TXOUTCFF_FULL_INT BIT(27)
+#define TXOUTCFF_EMPTY_INT BIT(26)
+#define TXCFF_FULL_INT BIT(25)
+#define TXCFF_EMPTY_INT BIT(24)
+#define RXOUTCFF_FULL_INT BIT(23)
+#define RXOUTCFF_EMPTY_INT BIT(22)
+#define RXCFF_FULL_INT BIT(21)
+#define RXCFF_EMPTY_INT BIT(20)
+#define TX_RQ_IN_INT BIT(19)
+#define TX_BQ_OUT_INT BIT(18)
+#define RX_BQ_IN_INT BIT(17)
+#define RX_FQ_OUT_INT BIT(16)
+#define TX_RQ_EMPTY_INT BIT(15)
+#define TX_RQ_FULL_INT BIT(14)
+#define TX_RQ_ALEMPTY_INT BIT(13)
+#define TX_RQ_ALFULL_INT BIT(12)
+#define TX_BQ_EMPTY_INT BIT(11)
+#define TX_BQ_FULL_INT BIT(10)
+#define TX_BQ_ALEMPTY_INT BIT(9)
+#define TX_BQ_ALFULL_INT BIT(8)
+#define RX_BQ_EMPTY_INT BIT(7)
+#define RX_BQ_FULL_INT BIT(6)
+#define RX_BQ_ALEMPTY_INT BIT(5)
+#define RX_BQ_ALFULL_INT BIT(4)
+#define RX_FQ_EMPTY_INT BIT(3)
+#define RX_FQ_FULL_INT BIT(2)
+#define RX_FQ_ALEMPTY_INT BIT(1)
+#define RX_FQ_ALFULL_INT BIT(0)
+
+#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
+ TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
+
+#define DESC_WR_RD_ENA 0x05cc
+#define IN_QUEUE_TH 0x05d8
+#define OUT_QUEUE_TH 0x05dc
+#define QUEUE_TX_BQ_SHIFT 16
+#define RX_BQ_IN_TIMEOUT_TH 0x05e0
+#define TX_RQ_IN_TIMEOUT_TH 0x05e4
+#define STOP_CMD 0x05e8
+#define BITS_TX_STOP BIT(1)
+#define BITS_RX_STOP BIT(0)
+#define FLUSH_CMD 0x05eC
+#define BITS_TX_FLUSH_CMD BIT(5)
+#define BITS_RX_FLUSH_CMD BIT(4)
+#define BITS_TX_FLUSH_FLAG_DOWN BIT(3)
+#define BITS_TX_FLUSH_FLAG_UP BIT(2)
+#define BITS_RX_FLUSH_FLAG_DOWN BIT(1)
+#define BITS_RX_FLUSH_FLAG_UP BIT(0)
+#define RX_CFF_NUM_REG 0x05f0
+#define PMU_FSM_REG 0x05f8
+#define RX_FIFO_PKT_IN_NUM 0x05fc
+#define RX_FIFO_PKT_OUT_NUM 0x0600
+
+#define RGMII_SPEED_1000 0x2c
+#define RGMII_SPEED_100 0x2f
+#define RGMII_SPEED_10 0x2d
+#define MII_SPEED_100 0x0f
+#define MII_SPEED_10 0x0d
+#define GMAC_SPEED_1000 0x05
+#define GMAC_SPEED_100 0x01
+#define GMAC_SPEED_10 0x00
+#define GMAC_FULL_DUPLEX BIT(4)
+
+#define RX_BQ_INT_THRESHOLD 0x01
+#define TX_RQ_INT_THRESHOLD 0x01
+#define RX_BQ_IN_TIMEOUT 0x10000
+#define TX_RQ_IN_TIMEOUT 0x50000
+
+#define MAC_MAX_FRAME_SIZE 1600
+#define DESC_SIZE 32
+#define RX_DESC_NUM 1024
+#define TX_DESC_NUM 1024
+
+#define DESC_VLD_FREE 0
+#define DESC_VLD_BUSY 0x80000000
+#define DESC_FL_MID 0
+#define DESC_FL_LAST 0x20000000
+#define DESC_FL_FIRST 0x40000000
+#define DESC_FL_FULL 0x60000000
+#define DESC_DATA_LEN_OFF 16
+#define DESC_BUFF_LEN_OFF 0
+#define DESC_DATA_MASK 0x7ff
+#define DESC_SG BIT(30)
+#define DESC_FRAGS_NUM_OFF 11
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
+#define dma_cnt(n) ((n) >> 5)
+#define dma_byte(n) ((n) << 5)
+
+#define HW_CAP_TSO BIT(0)
+#define GEMAC_V1 0
+#define GEMAC_V2 (GEMAC_V1 | HW_CAP_TSO)
+#define HAS_CAP_TSO(hw_cap) ((hw_cap) & HW_CAP_TSO)
+
+#define PHY_RESET_DELAYS_PROPERTY "hisilicon,phy-reset-delays-us"
+
+enum phy_reset_delays {
+ PRE_DELAY,
+ PULSE,
+ POST_DELAY,
+ DELAYS_NUM,
+};
+
+struct hix5hd2_desc {
+ __le32 buff_addr;
+ __le32 cmd;
+} __aligned(32);
+
+struct hix5hd2_desc_sw {
+ struct hix5hd2_desc *desc;
+ dma_addr_t phys_addr;
+ unsigned int count;
+ unsigned int size;
+};
+
+struct hix5hd2_sg_desc_ring {
+ struct sg_desc *desc;
+ dma_addr_t phys_addr;
+};
+
+struct frags_info {
+ __le32 addr;
+ __le32 size;
+};
+
+/* hardware supported max skb frags num */
+#define SG_MAX_SKB_FRAGS 17
+struct sg_desc {
+ __le32 total_len;
+ __le32 resvd0;
+ __le32 linear_addr;
+ __le32 linear_len;
+ /* reserve one more frags for memory alignment */
+ struct frags_info frags[SG_MAX_SKB_FRAGS + 1];
+};
+
+#define QUEUE_NUMS 4
+struct hix5hd2_priv {
+ struct hix5hd2_desc_sw pool[QUEUE_NUMS];
+#define rx_fq pool[0]
+#define rx_bq pool[1]
+#define tx_bq pool[2]
+#define tx_rq pool[3]
+ struct hix5hd2_sg_desc_ring tx_ring;
+
+ void __iomem *base;
+ void __iomem *ctrl_base;
+
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ struct sk_buff *rx_skb[RX_DESC_NUM];
+
+ struct device *dev;
+ struct net_device *netdev;
+
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+
+ unsigned long hw_cap;
+ unsigned int speed;
+ unsigned int duplex;
+
+ struct clk *mac_core_clk;
+ struct clk *mac_ifc_clk;
+ struct reset_control *mac_core_rst;
+ struct reset_control *mac_ifc_rst;
+ struct reset_control *phy_rst;
+ u32 phy_reset_delays[DELAYS_NUM];
+ struct mii_bus *bus;
+ struct napi_struct napi;
+ struct work_struct tx_timeout_task;
+};
+
+static inline void hix5hd2_mac_interface_reset(struct hix5hd2_priv *priv)
+{
+ if (!priv->mac_ifc_rst)
+ return;
+
+ reset_control_assert(priv->mac_ifc_rst);
+ reset_control_deassert(priv->mac_ifc_rst);
+}
+
+static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ u32 val;
+
+ priv->speed = speed;
+ priv->duplex = duplex;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ if (speed == SPEED_1000)
+ val = RGMII_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = RGMII_SPEED_100;
+ else
+ val = RGMII_SPEED_10;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (speed == SPEED_100)
+ val = MII_SPEED_100;
+ else
+ val = MII_SPEED_10;
+ break;
+ default:
+ netdev_warn(dev, "not supported mode\n");
+ val = MII_SPEED_10;
+ break;
+ }
+
+ if (duplex)
+ val |= GMAC_FULL_DUPLEX;
+ writel_relaxed(val, priv->ctrl_base);
+ hix5hd2_mac_interface_reset(priv);
+
+ writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
+ if (speed == SPEED_1000)
+ val = GMAC_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = GMAC_SPEED_100;
+ else
+ val = GMAC_SPEED_10;
+ writel_relaxed(val, priv->base + PORT_MODE);
+ writel_relaxed(0, priv->base + MODE_CHANGE_EN);
+ writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL);
+}
+
+static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx)
+{
+ writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+
+ writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv)
+{
+ hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr);
+ hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr);
+ hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr);
+ hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr);
+}
+
+static void hix5hd2_hw_init(struct hix5hd2_priv *priv)
+{
+ u32 val;
+
+ /* disable and clear all interrupts */
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+ writel_relaxed(~0, priv->base + RAW_PMU_INT);
+
+ writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL);
+ writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD);
+ writel_relaxed(0, priv->base + COL_SLOT_TIME);
+
+ val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT;
+ writel_relaxed(val, priv->base + IN_QUEUE_TH);
+
+ writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH);
+ writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH);
+
+ hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);
+ hix5hd2_set_desc_addr(priv);
+}
+
+static void hix5hd2_irq_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_irq_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA);
+ writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN);
+}
+
+static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(~(u32)(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
+ writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
+}
+
+static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ unsigned char *mac = dev->dev_addr;
+ u32 val;
+
+ val = mac[1] | (mac[0] << 8);
+ writel_relaxed(val, priv->base + STATION_ADDR_HIGH);
+
+ val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel_relaxed(val, priv->base + STATION_ADDR_LOW);
+}
+
+static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (!ret)
+ hix5hd2_hw_set_mac_addr(dev);
+
+ return ret;
+}
+
+static void hix5hd2_adjust_link(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = dev->phydev;
+
+ if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+ hix5hd2_config_port(dev, phy->speed, phy->duplex);
+ phy_print_status(phy);
+ }
+}
+
+static void hix5hd2_rx_refill(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ struct sk_buff *skb;
+ u32 start, end, num, pos, i;
+ u32 len = MAC_MAX_FRAME_SIZE;
+ dma_addr_t addr;
+
+ /* software write pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR));
+ /* logic read pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR));
+ num = CIRC_SPACE(start, end, RX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ if (priv->rx_skb[pos]) {
+ break;
+ } else {
+ skb = netdev_alloc_skb_ip_align(priv->netdev, len);
+ if (unlikely(skb == NULL))
+ break;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ desc = priv->rx_fq.desc + pos;
+ desc->buff_addr = cpu_to_le32(addr);
+ priv->rx_skb[pos] = skb;
+ desc->cmd = cpu_to_le32(DESC_VLD_FREE |
+ (len - 1) << DESC_BUFF_LEN_OFF);
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ /* ensure desc updated */
+ wmb();
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR);
+}
+
+static int hix5hd2_rx(struct net_device *dev, int limit)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 start, end, num, pos, i, len;
+
+ /* software read pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR));
+ /* logic write pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR));
+ num = CIRC_CNT(end, start, RX_DESC_NUM);
+ if (num > limit)
+ num = limit;
+
+ /* ensure get updated desc */
+ rmb();
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->rx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent rx_skb\n");
+ break;
+ }
+ priv->rx_skb[pos] = NULL;
+
+ desc = priv->rx_bq.desc + pos;
+ len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) &
+ DESC_DATA_MASK;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ if (skb->len > MAC_MAX_FRAME_SIZE) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+next:
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR);
+
+ hix5hd2_rx_refill(priv);
+
+ return num;
+}
+
+static void hix5hd2_clean_sg_desc(struct hix5hd2_priv *priv,
+ struct sk_buff *skb, u32 pos)
+{
+ struct sg_desc *desc;
+ dma_addr_t addr;
+ u32 len;
+ int i;
+
+ desc = priv->tx_ring.desc + pos;
+
+ addr = le32_to_cpu(desc->linear_addr);
+ len = le32_to_cpu(desc->linear_len);
+ dma_unmap_single(priv->dev, addr, len, DMA_TO_DEVICE);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ addr = le32_to_cpu(desc->frags[i].addr);
+ len = le32_to_cpu(desc->frags[i].size);
+ dma_unmap_page(priv->dev, addr, len, DMA_TO_DEVICE);
+ }
+}
+
+static void hix5hd2_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ u32 start, end, num, pos, i;
+ dma_addr_t addr;
+
+ netif_tx_lock(dev);
+
+ /* software read */
+ start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR));
+ /* logic write */
+ end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR));
+ num = CIRC_CNT(end, start, TX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->tx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent tx_skb\n");
+ break;
+ }
+
+ pkts_compl++;
+ bytes_compl += skb->len;
+ desc = priv->tx_rq.desc + pos;
+
+ if (skb_shinfo(skb)->nr_frags) {
+ hix5hd2_clean_sg_desc(priv, skb, pos);
+ } else {
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len,
+ DMA_TO_DEVICE);
+ }
+
+ priv->tx_skb[pos] = NULL;
+ dev_consume_skb_any(skb);
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR);
+
+ netif_tx_unlock(dev);
+
+ if (pkts_compl || bytes_compl)
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
+ netif_wake_queue(priv->netdev);
+}
+
+static int hix5hd2_poll(struct napi_struct *napi, int budget)
+{
+ struct hix5hd2_priv *priv = container_of(napi,
+ struct hix5hd2_priv, napi);
+ struct net_device *dev = priv->netdev;
+ int work_done = 0, task = budget;
+ int ints, num;
+
+ do {
+ hix5hd2_xmit_reclaim(dev);
+ num = hix5hd2_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if ((work_done >= budget) || (num == 0))
+ break;
+
+ ints = readl_relaxed(priv->base + RAW_PMU_INT);
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ hix5hd2_irq_enable(priv);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ int ints = readl_relaxed(priv->base + RAW_PMU_INT);
+
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ if (likely(ints & DEF_INT_MASK)) {
+ hix5hd2_irq_disable(priv);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static u32 hix5hd2_get_desc_cmd(struct sk_buff *skb, unsigned long hw_cap)
+{
+ u32 cmd = 0;
+
+ if (HAS_CAP_TSO(hw_cap)) {
+ if (skb_shinfo(skb)->nr_frags)
+ cmd |= DESC_SG;
+ cmd |= skb_shinfo(skb)->nr_frags << DESC_FRAGS_NUM_OFF;
+ } else {
+ cmd |= DESC_FL_FULL |
+ ((skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
+ }
+
+ cmd |= (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF;
+ cmd |= DESC_VLD_BUSY;
+
+ return cmd;
+}
+
+static int hix5hd2_fill_sg_desc(struct hix5hd2_priv *priv,
+ struct sk_buff *skb, u32 pos)
+{
+ struct sg_desc *desc;
+ dma_addr_t addr;
+ int ret;
+ int i;
+
+ desc = priv->tx_ring.desc + pos;
+
+ desc->total_len = cpu_to_le32(skb->len);
+ addr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr)))
+ return -EINVAL;
+ desc->linear_addr = cpu_to_le32(addr);
+ desc->linear_len = cpu_to_le32(skb_headlen(skb));
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
+
+ addr = skb_frag_dma_map(priv->dev, frag, 0, len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(priv->dev, addr);
+ if (unlikely(ret))
+ return -EINVAL;
+ desc->frags[i].addr = cpu_to_le32(addr);
+ desc->frags[i].size = cpu_to_le32(len);
+ }
+
+ return 0;
+}
+
+static netdev_tx_t hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 pos;
+ u32 cmd;
+ int ret;
+
+ /* software write pointer */
+ pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
+ if (unlikely(priv->tx_skb[pos])) {
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ desc = priv->tx_bq.desc + pos;
+
+ cmd = hix5hd2_get_desc_cmd(skb, priv->hw_cap);
+ desc->cmd = cpu_to_le32(cmd);
+
+ if (skb_shinfo(skb)->nr_frags) {
+ ret = hix5hd2_fill_sg_desc(priv, skb, pos);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ addr = priv->tx_ring.phys_addr + pos * sizeof(struct sg_desc);
+ } else {
+ addr = dma_map_single(priv->dev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(priv->dev, addr))) {
+ dev_kfree_skb_any(skb);
+ dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
+ }
+ desc->buff_addr = cpu_to_le32(addr);
+
+ priv->tx_skb[pos] = skb;
+
+ /* ensure desc updated */
+ wmb();
+
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
+
+ netif_trans_update(dev);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->rx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->rx_fq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr,
+ MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_skb[i] = NULL;
+ }
+
+ for (i = 0; i < TX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->tx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->tx_rq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[i] = NULL;
+ }
+}
+
+static int hix5hd2_net_open(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy;
+ int ret;
+
+ ret = clk_prepare_enable(priv->mac_core_clk);
+ if (ret < 0) {
+ netdev_err(dev, "failed to enable mac core clk %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(priv->mac_ifc_clk);
+ if (ret < 0) {
+ clk_disable_unprepare(priv->mac_core_clk);
+ netdev_err(dev, "failed to enable mac ifc clk %d\n", ret);
+ return ret;
+ }
+
+ phy = of_phy_connect(dev, priv->phy_node,
+ &hix5hd2_adjust_link, 0, priv->phy_mode);
+ if (!phy) {
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
+ return -ENODEV;
+ }
+
+ phy_start(phy);
+ hix5hd2_hw_init(priv);
+ hix5hd2_rx_refill(priv);
+
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ hix5hd2_port_enable(priv);
+ hix5hd2_irq_enable(priv);
+
+ return 0;
+}
+
+static int hix5hd2_net_close(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ hix5hd2_port_disable(priv);
+ hix5hd2_irq_disable(priv);
+ napi_disable(&priv->napi);
+ netif_stop_queue(dev);
+ hix5hd2_free_dma_desc_rings(priv);
+
+ if (dev->phydev) {
+ phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
+ }
+
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
+
+ return 0;
+}
+
+static void hix5hd2_tx_timeout_task(struct work_struct *work)
+{
+ struct hix5hd2_priv *priv;
+
+ priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
+ hix5hd2_net_close(priv->netdev);
+ hix5hd2_net_open(priv->netdev);
+}
+
+static void hix5hd2_net_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static const struct net_device_ops hix5hd2_netdev_ops = {
+ .ndo_open = hix5hd2_net_open,
+ .ndo_stop = hix5hd2_net_close,
+ .ndo_start_xmit = hix5hd2_net_xmit,
+ .ndo_tx_timeout = hix5hd2_net_timeout,
+ .ndo_set_mac_address = hix5hd2_net_set_mac_address,
+};
+
+static const struct ethtool_ops hix5hd2_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int i, timeout = 10000;
+
+ for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) {
+ if (i == timeout)
+ return -ETIMEDOUT;
+ usleep_range(10, 20);
+ }
+
+ return 0;
+}
+
+static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int val, ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ val = readl_relaxed(base + MDIO_RDATA_STATUS);
+ if (val & MDIO_R_VALID) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ val = readl_relaxed(priv->base + MDIO_SINGLE_DATA);
+ ret = (val >> 16) & 0xFFFF;
+out:
+ return ret;
+}
+
+static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(val, base + MDIO_SINGLE_DATA);
+ writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+out:
+ return ret;
+}
+
+static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ if (priv->pool[i].desc) {
+ dma_free_coherent(priv->dev, priv->pool[i].size,
+ priv->pool[i].desc,
+ priv->pool[i].phys_addr);
+ priv->pool[i].desc = NULL;
+ }
+ }
+}
+
+static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct hix5hd2_desc *virt_addr;
+ dma_addr_t phys_addr;
+ int size, i;
+
+ priv->rx_fq.count = RX_DESC_NUM;
+ priv->rx_bq.count = RX_DESC_NUM;
+ priv->tx_bq.count = TX_DESC_NUM;
+ priv->tx_rq.count = TX_DESC_NUM;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
+ virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
+ if (virt_addr == NULL)
+ goto error_free_pool;
+
+ priv->pool[i].size = size;
+ priv->pool[i].desc = virt_addr;
+ priv->pool[i].phys_addr = phys_addr;
+ }
+ return 0;
+
+error_free_pool:
+ hix5hd2_destroy_hw_desc_queue(priv);
+
+ return -ENOMEM;
+}
+
+static int hix5hd2_init_sg_desc_queue(struct hix5hd2_priv *priv)
+{
+ struct sg_desc *desc;
+ dma_addr_t phys_addr;
+
+ desc = dma_alloc_coherent(priv->dev,
+ TX_DESC_NUM * sizeof(struct sg_desc),
+ &phys_addr, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ priv->tx_ring.desc = desc;
+ priv->tx_ring.phys_addr = phys_addr;
+
+ return 0;
+}
+
+static void hix5hd2_destroy_sg_desc_queue(struct hix5hd2_priv *priv)
+{
+ if (priv->tx_ring.desc) {
+ dma_free_coherent(priv->dev,
+ TX_DESC_NUM * sizeof(struct sg_desc),
+ priv->tx_ring.desc, priv->tx_ring.phys_addr);
+ priv->tx_ring.desc = NULL;
+ }
+}
+
+static inline void hix5hd2_mac_core_reset(struct hix5hd2_priv *priv)
+{
+ if (!priv->mac_core_rst)
+ return;
+
+ reset_control_assert(priv->mac_core_rst);
+ reset_control_deassert(priv->mac_core_rst);
+}
+
+static void hix5hd2_sleep_us(u32 time_us)
+{
+ u32 time_ms;
+
+ if (!time_us)
+ return;
+
+ time_ms = DIV_ROUND_UP(time_us, 1000);
+ if (time_ms < 20)
+ usleep_range(time_us, time_us + 500);
+ else
+ msleep(time_ms);
+}
+
+static void hix5hd2_phy_reset(struct hix5hd2_priv *priv)
+{
+ /* To make sure PHY hardware reset success,
+ * we must keep PHY in deassert state first and
+ * then complete the hardware reset operation
+ */
+ reset_control_deassert(priv->phy_rst);
+ hix5hd2_sleep_us(priv->phy_reset_delays[PRE_DELAY]);
+
+ reset_control_assert(priv->phy_rst);
+ /* delay some time to ensure reset ok,
+ * this depends on PHY hardware feature
+ */
+ hix5hd2_sleep_us(priv->phy_reset_delays[PULSE]);
+ reset_control_deassert(priv->phy_rst);
+ /* delay some time to ensure later MDIO access */
+ hix5hd2_sleep_us(priv->phy_reset_delays[POST_DELAY]);
+}
+
+static const struct of_device_id hix5hd2_of_match[];
+
+static int hix5hd2_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ const struct of_device_id *of_id = NULL;
+ struct net_device *ndev;
+ struct hix5hd2_priv *priv;
+ struct mii_bus *bus;
+ const char *mac_addr;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->netdev = ndev;
+
+ of_id = of_match_device(hix5hd2_of_match, dev);
+ if (!of_id) {
+ ret = -EINVAL;
+ goto out_free_netdev;
+ }
+ priv->hw_cap = (unsigned long)of_id->data;
+
+ priv->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_free_netdev;
+ }
+
+ priv->ctrl_base = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(priv->ctrl_base)) {
+ ret = PTR_ERR(priv->ctrl_base);
+ goto out_free_netdev;
+ }
+
+ priv->mac_core_clk = devm_clk_get(&pdev->dev, "mac_core");
+ if (IS_ERR(priv->mac_core_clk)) {
+ netdev_err(ndev, "failed to get mac core clk\n");
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->mac_core_clk);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to enable mac core clk %d\n", ret);
+ goto out_free_netdev;
+ }
+
+ priv->mac_ifc_clk = devm_clk_get(&pdev->dev, "mac_ifc");
+ if (IS_ERR(priv->mac_ifc_clk))
+ priv->mac_ifc_clk = NULL;
+
+ ret = clk_prepare_enable(priv->mac_ifc_clk);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to enable mac ifc clk %d\n", ret);
+ goto out_disable_mac_core_clk;
+ }
+
+ priv->mac_core_rst = devm_reset_control_get(dev, "mac_core");
+ if (IS_ERR(priv->mac_core_rst))
+ priv->mac_core_rst = NULL;
+ hix5hd2_mac_core_reset(priv);
+
+ priv->mac_ifc_rst = devm_reset_control_get(dev, "mac_ifc");
+ if (IS_ERR(priv->mac_ifc_rst))
+ priv->mac_ifc_rst = NULL;
+
+ priv->phy_rst = devm_reset_control_get(dev, "phy");
+ if (IS_ERR(priv->phy_rst)) {
+ priv->phy_rst = NULL;
+ } else {
+ ret = of_property_read_u32_array(node,
+ PHY_RESET_DELAYS_PROPERTY,
+ priv->phy_reset_delays,
+ DELAYS_NUM);
+ if (ret)
+ goto out_disable_clk;
+ hix5hd2_phy_reset(priv);
+ }
+
+ bus = mdiobus_alloc();
+ if (bus == NULL) {
+ ret = -ENOMEM;
+ goto out_disable_clk;
+ }
+
+ bus->priv = priv;
+ bus->name = "hix5hd2_mii_bus";
+ bus->read = hix5hd2_mdio_read;
+ bus->write = hix5hd2_mdio_write;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ priv->bus = bus;
+
+ ret = of_mdiobus_register(bus, node);
+ if (ret)
+ goto err_free_mdio;
+
+ ret = of_get_phy_mode(node, &priv->phy_mode);
+ if (ret) {
+ netdev_err(ndev, "not find phy-mode\n");
+ goto err_mdiobus;
+ }
+
+ priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ netdev_err(ndev, "not find phy-handle\n");
+ ret = -EINVAL;
+ goto err_mdiobus;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
+ netdev_err(ndev, "No irq resource\n");
+ ret = -EINVAL;
+ goto out_phy_node;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt,
+ 0, pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto out_phy_node;
+ }
+
+ mac_addr = of_get_mac_address(node);
+ if (!IS_ERR(mac_addr))
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ netdev_warn(ndev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task);
+ ndev->watchdog_timeo = 6 * HZ;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hix5hd2_netdev_ops;
+ ndev->ethtool_ops = &hix5hd2_ethtools_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (HAS_CAP_TSO(priv->hw_cap))
+ ndev->hw_features |= NETIF_F_SG;
+
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->vlan_features |= ndev->features;
+
+ ret = hix5hd2_init_hw_desc_queue(priv);
+ if (ret)
+ goto out_phy_node;
+
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+
+ if (HAS_CAP_TSO(priv->hw_cap)) {
+ ret = hix5hd2_init_sg_desc_queue(priv);
+ if (ret)
+ goto out_destroy_queue;
+ }
+
+ ret = register_netdev(priv->netdev);
+ if (ret) {
+ netdev_err(ndev, "register_netdev failed!");
+ goto out_destroy_queue;
+ }
+
+ clk_disable_unprepare(priv->mac_ifc_clk);
+ clk_disable_unprepare(priv->mac_core_clk);
+
+ return ret;
+
+out_destroy_queue:
+ if (HAS_CAP_TSO(priv->hw_cap))
+ hix5hd2_destroy_sg_desc_queue(priv);
+ netif_napi_del(&priv->napi);
+ hix5hd2_destroy_hw_desc_queue(priv);
+out_phy_node:
+ of_node_put(priv->phy_node);
+err_mdiobus:
+ mdiobus_unregister(bus);
+err_free_mdio:
+ mdiobus_free(bus);
+out_disable_clk:
+ clk_disable_unprepare(priv->mac_ifc_clk);
+out_disable_mac_core_clk:
+ clk_disable_unprepare(priv->mac_core_clk);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int hix5hd2_dev_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hix5hd2_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+ mdiobus_unregister(priv->bus);
+ mdiobus_free(priv->bus);
+
+ if (HAS_CAP_TSO(priv->hw_cap))
+ hix5hd2_destroy_sg_desc_queue(priv);
+ hix5hd2_destroy_hw_desc_queue(priv);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id hix5hd2_of_match[] = {
+ { .compatible = "hisilicon,hisi-gmac-v1", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hisi-gmac-v2", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hix5hd2-gmac", .data = (void *)GEMAC_V1 },
+ { .compatible = "hisilicon,hi3798cv200-gmac", .data = (void *)GEMAC_V2 },
+ { .compatible = "hisilicon,hi3516a-gmac", .data = (void *)GEMAC_V2 },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
+
+static struct platform_driver hix5hd2_dev_driver = {
+ .driver = {
+ .name = "hisi-gmac",
+ .of_match_table = hix5hd2_of_match,
+ },
+ .probe = hix5hd2_dev_probe,
+ .remove = hix5hd2_dev_remove,
+};
+
+module_platform_driver(hix5hd2_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON Gigabit Ethernet MAC driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hisi-gmac");
diff --git a/drivers/net/ethernet/hisilicon/hns/Makefile b/drivers/net/ethernet/hisilicon/hns/Makefile
new file mode 100644
index 000000000..7aa623b9c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HNS) += hnae.o
+
+obj-$(CONFIG_HNS_DSAF) += hns_dsaf.o
+hns_dsaf-objs = hns_ae_adapt.o hns_dsaf_gmac.o hns_dsaf_mac.o hns_dsaf_misc.o \
+ hns_dsaf_main.o hns_dsaf_ppe.o hns_dsaf_rcb.o hns_dsaf_xgmac.o
+
+obj-$(CONFIG_HNS_ENET) += hns_enet_drv.o
+hns_enet_drv-objs = hns_enet.o hns_ethtool.o
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
new file mode 100644
index 000000000..430eccea8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include "hnae.h"
+
+#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
+
+static struct class *hnae_class;
+
+static void
+hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_add_tail_rcu(node, head);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static void hnae_list_del(spinlock_t *lock, struct list_head *node)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(lock, flags);
+ list_del_rcu(node);
+ spin_unlock_irqrestore(lock, flags);
+}
+
+static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ unsigned int order = hnae_page_order(ring);
+ struct page *p = dev_alloc_pages(order);
+
+ if (!p)
+ return -ENOMEM;
+
+ cb->priv = p;
+ cb->page_offset = 0;
+ cb->reuse_flag = 0;
+ cb->buf = page_address(p);
+ cb->length = hnae_page_size(ring);
+ cb->type = DESC_TYPE_PAGE;
+
+ return 0;
+}
+
+static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ if (unlikely(!cb->priv))
+ return;
+
+ if (cb->type == DESC_TYPE_SKB)
+ dev_kfree_skb_any((struct sk_buff *)cb->priv);
+ else if (unlikely(is_rx_ring(ring)))
+ put_page((struct page *)cb->priv);
+
+ cb->priv = NULL;
+}
+
+static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
+ cb->length, ring_to_dma_dir(ring));
+
+ if (dma_mapping_error(ring_to_dev(ring), cb->dma))
+ return -EIO;
+
+ return 0;
+}
+
+static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
+{
+ if (cb->type == DESC_TYPE_SKB)
+ dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+ else if (cb->length)
+ dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+}
+
+static struct hnae_buf_ops hnae_bops = {
+ .alloc_buffer = hnae_alloc_buffer,
+ .free_buffer = hnae_free_buffer,
+ .map_buffer = hnae_map_buffer,
+ .unmap_buffer = hnae_unmap_buffer,
+};
+
+static int __ae_match(struct device *dev, const void *data)
+{
+ struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
+
+ if (dev_of_node(hdev->dev))
+ return (data == &hdev->dev->of_node->fwnode);
+ else if (is_acpi_node(hdev->dev->fwnode))
+ return (data == hdev->dev->fwnode);
+
+ dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
+ return 0;
+}
+
+static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ WARN_ON(!fwnode);
+
+ dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
+
+ return dev ? cls_to_ae_dev(dev) : NULL;
+}
+
+static void hnae_free_buffers(struct hnae_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < ring->desc_num; i++)
+ hnae_free_buffer_detach(ring, i);
+}
+
+/* Allocate memory for raw pkg, and map with dma */
+static int hnae_alloc_buffers(struct hnae_ring *ring)
+{
+ int i, j, ret;
+
+ for (i = 0; i < ring->desc_num; i++) {
+ ret = hnae_alloc_buffer_attach(ring, i);
+ if (ret)
+ goto out_buffer_fail;
+ }
+
+ return 0;
+
+out_buffer_fail:
+ for (j = i - 1; j >= 0; j--)
+ hnae_free_buffer_detach(ring, j);
+ return ret;
+}
+
+/* free desc along with its attached buffer */
+static void hnae_free_desc(struct hnae_ring *ring)
+{
+ dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
+ ring->desc_num * sizeof(ring->desc[0]),
+ ring_to_dma_dir(ring));
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+}
+
+/* alloc desc, without buffer attached */
+static int hnae_alloc_desc(struct hnae_ring *ring)
+{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
+ ring->desc = kzalloc(size, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
+ ring->desc, size, ring_to_dma_dir(ring));
+ if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
+ ring->desc_dma_addr = 0;
+ kfree(ring->desc);
+ ring->desc = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/* fini ring, also free the buffer for the ring */
+static void hnae_fini_ring(struct hnae_ring *ring)
+{
+ if (is_rx_ring(ring))
+ hnae_free_buffers(ring);
+
+ hnae_free_desc(ring);
+ kfree(ring->desc_cb);
+ ring->desc_cb = NULL;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+}
+
+/* init ring, and with buffer for rx ring */
+static int
+hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
+{
+ int ret;
+
+ if (ring->desc_num <= 0 || ring->buf_size <= 0)
+ return -EINVAL;
+
+ ring->q = q;
+ ring->flags = flags;
+ ring->coal_param = q->handle->coal_param;
+ assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
+
+ /* not matter for tx or rx ring, the ntc and ntc start from 0 */
+ assert(ring->next_to_use == 0);
+ assert(ring->next_to_clean == 0);
+
+ ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
+ GFP_KERNEL);
+ if (!ring->desc_cb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hnae_alloc_desc(ring);
+ if (ret)
+ goto out_with_desc_cb;
+
+ if (is_rx_ring(ring)) {
+ ret = hnae_alloc_buffers(ring);
+ if (ret)
+ goto out_with_desc;
+ }
+
+ return 0;
+
+out_with_desc:
+ hnae_free_desc(ring);
+out_with_desc_cb:
+ kfree(ring->desc_cb);
+ ring->desc_cb = NULL;
+out:
+ return ret;
+}
+
+static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
+ struct hnae_ae_dev *dev)
+{
+ int ret;
+
+ q->dev = dev;
+ q->handle = h;
+
+ ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
+ if (ret)
+ goto out;
+
+ ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
+ if (ret)
+ goto out_with_tx_ring;
+
+ if (dev->ops->init_queue)
+ dev->ops->init_queue(q);
+
+ return 0;
+
+out_with_tx_ring:
+ hnae_fini_ring(&q->tx_ring);
+out:
+ return ret;
+}
+
+static void hnae_fini_queue(struct hnae_queue *q)
+{
+ if (q->dev->ops->fini_queue)
+ q->dev->ops->fini_queue(q);
+
+ hnae_fini_ring(&q->tx_ring);
+ hnae_fini_ring(&q->rx_ring);
+}
+
+/*
+ * ae_chain - define ae chain head
+ */
+static RAW_NOTIFIER_HEAD(ae_chain);
+
+int hnae_register_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&ae_chain, nb);
+}
+EXPORT_SYMBOL(hnae_register_notifier);
+
+void hnae_unregister_notifier(struct notifier_block *nb)
+{
+ if (raw_notifier_chain_unregister(&ae_chain, nb))
+ dev_err(NULL, "notifier chain unregister fail\n");
+}
+EXPORT_SYMBOL(hnae_unregister_notifier);
+
+int hnae_reinit_handle(struct hnae_handle *handle)
+{
+ int i, j;
+ int ret;
+
+ for (i = 0; i < handle->q_num; i++) /* free ring*/
+ hnae_fini_queue(handle->qs[i]);
+
+ if (handle->dev->ops->reset)
+ handle->dev->ops->reset(handle);
+
+ for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
+ ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
+ if (ret)
+ goto out_when_init_queue;
+ }
+ return 0;
+out_when_init_queue:
+ for (j = i - 1; j >= 0; j--)
+ hnae_fini_queue(handle->qs[j]);
+ return ret;
+}
+EXPORT_SYMBOL(hnae_reinit_handle);
+
+/* hnae_get_handle - get a handle from the AE
+ * @owner_dev: the dev use this handle
+ * @ae_id: the id of the ae to be used
+ * @ae_opts: the options set for the handle
+ * @bops: the callbacks for buffer management
+ *
+ * return handle ptr or ERR_PTR
+ */
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+ const struct fwnode_handle *fwnode,
+ u32 port_id,
+ struct hnae_buf_ops *bops)
+{
+ struct hnae_ae_dev *dev;
+ struct hnae_handle *handle;
+ int i, j;
+ int ret;
+
+ dev = find_ae(fwnode);
+ if (!dev)
+ return ERR_PTR(-ENODEV);
+
+ handle = dev->ops->get_handle(dev, port_id);
+ if (IS_ERR(handle)) {
+ put_device(&dev->cls_dev);
+ return handle;
+ }
+
+ handle->dev = dev;
+ handle->owner_dev = owner_dev;
+ handle->bops = bops ? bops : &hnae_bops;
+ handle->eport_id = port_id;
+
+ for (i = 0; i < handle->q_num; i++) {
+ ret = hnae_init_queue(handle, handle->qs[i], dev);
+ if (ret)
+ goto out_when_init_queue;
+ }
+
+ __module_get(dev->owner);
+
+ hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
+
+ return handle;
+
+out_when_init_queue:
+ for (j = i - 1; j >= 0; j--)
+ hnae_fini_queue(handle->qs[j]);
+
+ put_device(&dev->cls_dev);
+
+ return ERR_PTR(-ENOMEM);
+}
+EXPORT_SYMBOL(hnae_get_handle);
+
+void hnae_put_handle(struct hnae_handle *h)
+{
+ struct hnae_ae_dev *dev = h->dev;
+ int i;
+
+ for (i = 0; i < h->q_num; i++)
+ hnae_fini_queue(h->qs[i]);
+
+ if (h->dev->ops->reset)
+ h->dev->ops->reset(h);
+
+ hnae_list_del(&dev->lock, &h->node);
+
+ if (dev->ops->put_handle)
+ dev->ops->put_handle(h);
+
+ module_put(dev->owner);
+
+ put_device(&dev->cls_dev);
+}
+EXPORT_SYMBOL(hnae_put_handle);
+
+static void hnae_release(struct device *dev)
+{
+}
+
+/**
+ * hnae_ae_register - register a AE engine to hnae framework
+ * @hdev: the hnae ae engine device
+ * @owner: the module who provides this dev
+ * NOTE: the duplicated name will not be checked
+ */
+int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
+{
+ static atomic_t id = ATOMIC_INIT(-1);
+ int ret;
+
+ if (!hdev->dev)
+ return -ENODEV;
+
+ if (!hdev->ops || !hdev->ops->get_handle ||
+ !hdev->ops->toggle_ring_irq ||
+ !hdev->ops->get_status || !hdev->ops->adjust_link)
+ return -EINVAL;
+
+ hdev->owner = owner;
+ hdev->id = (int)atomic_inc_return(&id);
+ hdev->cls_dev.parent = hdev->dev;
+ hdev->cls_dev.class = hnae_class;
+ hdev->cls_dev.release = hnae_release;
+ (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
+ ret = device_register(&hdev->cls_dev);
+ if (ret) {
+ put_device(&hdev->cls_dev);
+ return ret;
+ }
+
+ __module_get(THIS_MODULE);
+
+ INIT_LIST_HEAD(&hdev->handle_list);
+ spin_lock_init(&hdev->lock);
+
+ ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
+ if (ret)
+ dev_dbg(hdev->dev,
+ "has not notifier for AE: %s\n", hdev->name);
+
+ return 0;
+}
+EXPORT_SYMBOL(hnae_ae_register);
+
+/**
+ * hnae_ae_unregister - unregisters a HNAE AE engine
+ * @hdev: the device to unregister
+ */
+void hnae_ae_unregister(struct hnae_ae_dev *hdev)
+{
+ device_unregister(&hdev->cls_dev);
+ module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL(hnae_ae_unregister);
+
+static int __init hnae_init(void)
+{
+ hnae_class = class_create(THIS_MODULE, "hnae");
+ return PTR_ERR_OR_ZERO(hnae_class);
+}
+
+static void __exit hnae_exit(void)
+{
+ class_destroy(hnae_class);
+}
+
+subsys_initcall(hnae_init);
+module_exit(hnae_exit);
+
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
+
+/* vi: set tw=78 noet: */
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
new file mode 100644
index 000000000..6ab945830
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -0,0 +1,713 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef __HNAE_H
+#define __HNAE_H
+
+/* Names used in this framework:
+ * ae handle (handle):
+ * a set of queues provided by AE
+ * ring buffer queue (rbq):
+ * the channel between upper layer and the AE, can do tx and rx
+ * ring:
+ * a tx or rx channel within a rbq
+ * ring description (desc):
+ * an element in the ring with packet information
+ * buffer:
+ * a memory region referred by desc with the full packet payload
+ *
+ * "num" means a static number set as a parameter, "count" mean a dynamic
+ * number set while running
+ * "cb" means control block
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <linux/phy.h>
+#include <linux/types.h>
+
+#define HNAE_DRIVER_VERSION "2.0"
+#define HNAE_DRIVER_NAME "hns"
+#define HNAE_COPYRIGHT "Copyright(c) 2015 Huawei Corporation."
+#define HNAE_DRIVER_STRING "Hisilicon Network Subsystem Driver"
+#define HNAE_DEFAULT_DEVICE_DESCR "Hisilicon Network Subsystem"
+
+#ifdef DEBUG
+
+#ifndef assert
+#define assert(expr) \
+do { \
+ if (!(expr)) { \
+ pr_err("Assertion failed! %s, %s, %s, line %d\n", \
+ #expr, __FILE__, __func__, __LINE__); \
+ } \
+} while (0)
+#endif
+
+#else
+
+#ifndef assert
+#define assert(expr)
+#endif
+
+#endif
+
+#define AE_VERSION_1 ('6' << 16 | '6' << 8 | '0')
+#define AE_VERSION_2 ('1' << 24 | '6' << 16 | '1' << 8 | '0')
+#define AE_IS_VER1(ver) ((ver) == AE_VERSION_1)
+#define AE_NAME_SIZE 16
+
+#define BD_SIZE_2048_MAX_MTU 6000
+
+/* some said the RX and TX RCB format should not be the same in the future. But
+ * it is the same now...
+ */
+#define RCB_REG_BASEADDR_L 0x00 /* P660 support only 32bit accessing */
+#define RCB_REG_BASEADDR_H 0x04
+#define RCB_REG_BD_NUM 0x08
+#define RCB_REG_BD_LEN 0x0C
+#define RCB_REG_PKTLINE 0x10
+#define RCB_REG_TAIL 0x18
+#define RCB_REG_HEAD 0x1C
+#define RCB_REG_FBDNUM 0x20
+#define RCB_REG_OFFSET 0x24 /* pkt num to be handled */
+#define RCB_REG_PKTNUM_RECORD 0x2C /* total pkt received */
+
+#define HNS_RX_HEAD_SIZE 256
+
+#define HNAE_AE_REGISTER 0x1
+
+#define RCB_RING_NAME_LEN (IFNAMSIZ + 4)
+
+#define HNAE_LOWEST_LATENCY_COAL_PARAM 30
+#define HNAE_LOW_LATENCY_COAL_PARAM 80
+#define HNAE_BULK_LATENCY_COAL_PARAM 150
+
+enum hnae_led_state {
+ HNAE_LED_INACTIVE,
+ HNAE_LED_ACTIVE,
+ HNAE_LED_ON,
+ HNAE_LED_OFF
+};
+
+#define HNS_RX_FLAG_VLAN_PRESENT 0x1
+#define HNS_RX_FLAG_L3ID_IPV4 0x0
+#define HNS_RX_FLAG_L3ID_IPV6 0x1
+#define HNS_RX_FLAG_L4ID_UDP 0x0
+#define HNS_RX_FLAG_L4ID_TCP 0x1
+#define HNS_RX_FLAG_L4ID_SCTP 0x3
+
+#define HNS_TXD_ASID_S 0
+#define HNS_TXD_ASID_M (0xff << HNS_TXD_ASID_S)
+#define HNS_TXD_BUFNUM_S 8
+#define HNS_TXD_BUFNUM_M (0x3 << HNS_TXD_BUFNUM_S)
+#define HNS_TXD_PORTID_S 10
+#define HNS_TXD_PORTID_M (0x7 << HNS_TXD_PORTID_S)
+
+#define HNS_TXD_RA_B 8
+#define HNS_TXD_RI_B 9
+#define HNS_TXD_L4CS_B 10
+#define HNS_TXD_L3CS_B 11
+#define HNS_TXD_FE_B 12
+#define HNS_TXD_VLD_B 13
+#define HNS_TXD_IPOFFSET_S 14
+#define HNS_TXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+
+#define HNS_RXD_IPOFFSET_S 0
+#define HNS_RXD_IPOFFSET_M (0xff << HNS_TXD_IPOFFSET_S)
+#define HNS_RXD_BUFNUM_S 8
+#define HNS_RXD_BUFNUM_M (0x3 << HNS_RXD_BUFNUM_S)
+#define HNS_RXD_PORTID_S 10
+#define HNS_RXD_PORTID_M (0x7 << HNS_RXD_PORTID_S)
+#define HNS_RXD_DMAC_S 13
+#define HNS_RXD_DMAC_M (0x3 << HNS_RXD_DMAC_S)
+#define HNS_RXD_VLAN_S 15
+#define HNS_RXD_VLAN_M (0x3 << HNS_RXD_VLAN_S)
+#define HNS_RXD_L3ID_S 17
+#define HNS_RXD_L3ID_M (0xf << HNS_RXD_L3ID_S)
+#define HNS_RXD_L4ID_S 21
+#define HNS_RXD_L4ID_M (0xf << HNS_RXD_L4ID_S)
+#define HNS_RXD_FE_B 25
+#define HNS_RXD_FRAG_B 26
+#define HNS_RXD_VLD_B 27
+#define HNS_RXD_L2E_B 28
+#define HNS_RXD_L3E_B 29
+#define HNS_RXD_L4E_B 30
+#define HNS_RXD_DROP_B 31
+
+#define HNS_RXD_VLANID_S 8
+#define HNS_RXD_VLANID_M (0xfff << HNS_RXD_VLANID_S)
+#define HNS_RXD_CFI_B 20
+#define HNS_RXD_PRI_S 21
+#define HNS_RXD_PRI_M (0x7 << HNS_RXD_PRI_S)
+#define HNS_RXD_ASID_S 24
+#define HNS_RXD_ASID_M (0xff << HNS_RXD_ASID_S)
+
+#define HNSV2_TXD_BUFNUM_S 0
+#define HNSV2_TXD_BUFNUM_M (0x7 << HNSV2_TXD_BUFNUM_S)
+#define HNSV2_TXD_PORTID_S 4
+#define HNSV2_TXD_PORTID_M (0X7 << HNSV2_TXD_PORTID_S)
+#define HNSV2_TXD_RI_B 1
+#define HNSV2_TXD_L4CS_B 2
+#define HNSV2_TXD_L3CS_B 3
+#define HNSV2_TXD_FE_B 4
+#define HNSV2_TXD_VLD_B 5
+
+#define HNSV2_TXD_TSE_B 0
+#define HNSV2_TXD_VLAN_EN_B 1
+#define HNSV2_TXD_SNAP_B 2
+#define HNSV2_TXD_IPV6_B 3
+#define HNSV2_TXD_SCTP_B 4
+
+/* hardware spec ring buffer format */
+struct __packed hnae_desc {
+ __le64 addr;
+ union {
+ struct {
+ union {
+ __le16 asid_bufnum_pid;
+ __le16 asid;
+ };
+ __le16 send_size;
+ union {
+ __le32 flag_ipoffset;
+ struct {
+ __u8 bn_pid;
+ __u8 ra_ri_cs_fe_vld;
+ __u8 ip_offset;
+ __u8 tse_vlan_snap_v6_sctp_nth;
+ };
+ };
+ __le16 mss;
+ __u8 l4_len;
+ __u8 reserved1;
+ __le16 paylen;
+ __u8 vmid;
+ __u8 qid;
+ __le32 reserved2[2];
+ } tx;
+
+ struct {
+ __le32 ipoff_bnum_pid_flag;
+ __le16 pkt_len;
+ __le16 size;
+ union {
+ __le32 vlan_pri_asid;
+ struct {
+ __le16 asid;
+ __le16 vlan_cfi_pri;
+ };
+ };
+ __le32 rss_hash;
+ __le32 reserved_1[2];
+ } rx;
+ };
+};
+
+struct hnae_desc_cb {
+ dma_addr_t dma; /* dma address of this desc */
+ void *buf; /* cpu addr for a desc */
+
+ /* priv data for the desc, e.g. skb when use with ip stack*/
+ void *priv;
+ u32 page_offset;
+ u32 length; /* length of the buffer */
+
+ u16 reuse_flag;
+
+ /* desc type, used by the ring user to mark the type of the priv data */
+ u16 type;
+};
+
+#define setflags(flags, bits) ((flags) |= (bits))
+#define unsetflags(flags, bits) ((flags) &= ~(bits))
+
+/* hnae_ring->flags fields */
+#define RINGF_DIR 0x1 /* TX or RX ring, set if TX */
+#define is_tx_ring(ring) ((ring)->flags & RINGF_DIR)
+#define is_rx_ring(ring) (!is_tx_ring(ring))
+#define ring_to_dma_dir(ring) (is_tx_ring(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+struct ring_stats {
+ u64 io_err_cnt;
+ u64 sw_err_cnt;
+ u64 seg_pkt_cnt;
+ union {
+ struct {
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_err_cnt;
+ u64 restart_queue;
+ u64 tx_busy;
+ };
+ struct {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_err_cnt;
+ u64 reuse_pg_cnt;
+ u64 err_pkt_len;
+ u64 non_vld_descs;
+ u64 err_bd_num;
+ u64 l2_err;
+ u64 l3l4_csum_err;
+ };
+ };
+};
+
+struct hnae_queue;
+
+struct hnae_ring {
+ u8 __iomem *io_base; /* base io address for the ring */
+ struct hnae_desc *desc; /* dma map address space */
+ struct hnae_desc_cb *desc_cb;
+ struct hnae_queue *q;
+ int irq;
+ char ring_name[RCB_RING_NAME_LEN];
+
+ /* statistic */
+ struct ring_stats stats;
+
+ dma_addr_t desc_dma_addr;
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 desc_num; /* total number of desc */
+ u16 max_desc_num_per_pkt;
+ u16 max_raw_data_sz_per_desc;
+ u16 max_pkt_size;
+ int next_to_use; /* idx of next spare desc */
+
+ /* idx of lastest sent desc, the ring is empty when equal to
+ * next_to_use
+ */
+ int next_to_clean;
+
+ int flags; /* ring attribute */
+ int irq_init_flag;
+
+ /* total rx bytes after last rx rate calucated */
+ u64 coal_last_rx_bytes;
+ unsigned long coal_last_jiffies;
+ u32 coal_param;
+ u32 coal_rx_rate; /* rx rate in MB */
+};
+
+#define ring_ptr_move_fw(ring, p) \
+ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
+#define ring_ptr_move_bw(ring, p) \
+ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
+
+enum hns_desc_type {
+ DESC_TYPE_SKB,
+ DESC_TYPE_PAGE,
+};
+
+#define assert_is_ring_idx(ring, idx) \
+ assert((idx) >= 0 && (idx) < (ring)->desc_num)
+
+/* the distance between [begin, end) in a ring buffer
+ * note: there is a unuse slot between the begin and the end
+ */
+static inline int ring_dist(struct hnae_ring *ring, int begin, int end)
+{
+ assert_is_ring_idx(ring, begin);
+ assert_is_ring_idx(ring, end);
+
+ return (end - begin + ring->desc_num) % ring->desc_num;
+}
+
+static inline int ring_space(struct hnae_ring *ring)
+{
+ return ring->desc_num -
+ ring_dist(ring, ring->next_to_clean, ring->next_to_use) - 1;
+}
+
+static inline int is_ring_empty(struct hnae_ring *ring)
+{
+ assert_is_ring_idx(ring, ring->next_to_use);
+ assert_is_ring_idx(ring, ring->next_to_clean);
+
+ return ring->next_to_use == ring->next_to_clean;
+}
+
+#define hnae_buf_size(_ring) ((_ring)->buf_size)
+#define hnae_page_order(_ring) (get_order(hnae_buf_size(_ring)))
+#define hnae_page_size(_ring) (PAGE_SIZE << hnae_page_order(_ring))
+
+struct hnae_handle;
+
+/* allocate and dma map space for hnae desc */
+struct hnae_buf_ops {
+ int (*alloc_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ void (*free_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ int (*map_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+ void (*unmap_buffer)(struct hnae_ring *ring, struct hnae_desc_cb *cb);
+};
+
+struct hnae_queue {
+ u8 __iomem *io_base;
+ phys_addr_t phy_base;
+ struct hnae_ae_dev *dev; /* the device who use this queue */
+ struct hnae_ring rx_ring ____cacheline_internodealigned_in_smp;
+ struct hnae_ring tx_ring ____cacheline_internodealigned_in_smp;
+ struct hnae_handle *handle;
+};
+
+/*hnae loop mode*/
+enum hnae_loop {
+ MAC_INTERNALLOOP_MAC = 0,
+ MAC_INTERNALLOOP_SERDES,
+ MAC_INTERNALLOOP_PHY,
+ MAC_LOOP_PHY_NONE,
+ MAC_LOOP_NONE,
+};
+
+/*hnae port type*/
+enum hnae_port_type {
+ HNAE_PORT_SERVICE = 0,
+ HNAE_PORT_DEBUG
+};
+
+/* mac media type */
+enum hnae_media_type {
+ HNAE_MEDIA_TYPE_UNKNOWN = 0,
+ HNAE_MEDIA_TYPE_FIBER,
+ HNAE_MEDIA_TYPE_COPPER,
+ HNAE_MEDIA_TYPE_BACKPLANE,
+};
+
+/* This struct defines the operation on the handle.
+ *
+ * get_handle(): (mandatory)
+ * Get a handle from AE according to its name and options.
+ * the AE driver should manage the space used by handle and its queues while
+ * the HNAE framework will allocate desc and desc_cb for all rings in the
+ * queues.
+ * put_handle():
+ * Release the handle.
+ * start():
+ * Enable the hardware, include all queues
+ * stop():
+ * Disable the hardware
+ * set_opts(): (mandatory)
+ * Set options to the AE
+ * get_opts(): (mandatory)
+ * Get options from the AE
+ * get_status():
+ * Get the carrier state of the back channel of the handle, 1 for ok, 0 for
+ * non-ok
+ * toggle_ring_irq(): (mandatory)
+ * Set the ring irq to be enabled(0) or disable(1)
+ * toggle_queue_status(): (mandatory)
+ * Set the queue to be enabled(1) or disable(0), this will not change the
+ * ring irq state
+ * adjust_link()
+ * adjust link status
+ * set_loopback()
+ * set loopback
+ * get_ring_bdnum_limit()
+ * get ring bd number limit
+ * get_pauseparam()
+ * get tx and rx of pause frame use
+ * set_autoneg()
+ * set auto autonegotiation of pause frame use
+ * get_autoneg()
+ * get auto autonegotiation of pause frame use
+ * set_pauseparam()
+ * set tx and rx of pause frame use
+ * get_coalesce_usecs()
+ * get usecs to delay a TX interrupt after a packet is sent
+ * get_rx_max_coalesced_frames()
+ * get Maximum number of packets to be sent before a TX interrupt.
+ * set_coalesce_usecs()
+ * set usecs to delay a TX interrupt after a packet is sent
+ * set_coalesce_frames()
+ * set Maximum number of packets to be sent before a TX interrupt.
+ * get_ringnum()
+ * get RX/TX ring number
+ * get_max_ringnum()
+ * get RX/TX ring maximum number
+ * get_mac_addr()
+ * get mac address
+ * set_mac_addr()
+ * set mac address
+ * clr_mc_addr()
+ * clear mcast tcam table
+ * set_mc_addr()
+ * set multicast mode
+ * add_uc_addr()
+ * add ucast address
+ * rm_uc_addr()
+ * remove ucast address
+ * set_mtu()
+ * set mtu
+ * update_stats()
+ * update Old network device statistics
+ * get_ethtool_stats()
+ * get ethtool network device statistics
+ * get_strings()
+ * get a set of strings that describe the requested objects
+ * get_sset_count()
+ * get number of strings that @get_strings will write
+ * update_led_status()
+ * update the led status
+ * set_led_id()
+ * set led id
+ * get_regs()
+ * get regs dump
+ * get_regs_len()
+ * get the len of the regs dump
+ */
+struct hnae_ae_ops {
+ struct hnae_handle *(*get_handle)(struct hnae_ae_dev *dev,
+ u32 port_id);
+ void (*put_handle)(struct hnae_handle *handle);
+ void (*init_queue)(struct hnae_queue *q);
+ void (*fini_queue)(struct hnae_queue *q);
+ int (*start)(struct hnae_handle *handle);
+ void (*stop)(struct hnae_handle *handle);
+ void (*reset)(struct hnae_handle *handle);
+ int (*set_opts)(struct hnae_handle *handle, int type, void *opts);
+ int (*get_opts)(struct hnae_handle *handle, int type, void **opts);
+ int (*get_status)(struct hnae_handle *handle);
+ int (*get_info)(struct hnae_handle *handle,
+ u8 *auto_neg, u16 *speed, u8 *duplex);
+ void (*toggle_ring_irq)(struct hnae_ring *ring, u32 val);
+ void (*adjust_link)(struct hnae_handle *handle, int speed, int duplex);
+ bool (*need_adjust_link)(struct hnae_handle *handle,
+ int speed, int duplex);
+ int (*set_loopback)(struct hnae_handle *handle,
+ enum hnae_loop loop_mode, int en);
+ void (*get_ring_bdnum_limit)(struct hnae_queue *queue,
+ u32 *uplimit);
+ void (*get_pauseparam)(struct hnae_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en);
+ int (*set_autoneg)(struct hnae_handle *handle, u8 enable);
+ int (*get_autoneg)(struct hnae_handle *handle);
+ int (*set_pauseparam)(struct hnae_handle *handle,
+ u32 auto_neg, u32 rx_en, u32 tx_en);
+ void (*get_coalesce_usecs)(struct hnae_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs);
+ void (*get_max_coalesced_frames)(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames);
+ int (*set_coalesce_usecs)(struct hnae_handle *handle, u32 timeout);
+ int (*set_coalesce_frames)(struct hnae_handle *handle,
+ u32 tx_frames, u32 rx_frames);
+ void (*get_coalesce_range)(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high);
+ void (*set_promisc_mode)(struct hnae_handle *handle, u32 en);
+ int (*get_mac_addr)(struct hnae_handle *handle, void **p);
+ int (*set_mac_addr)(struct hnae_handle *handle, void *p);
+ int (*add_uc_addr)(struct hnae_handle *handle,
+ const unsigned char *addr);
+ int (*rm_uc_addr)(struct hnae_handle *handle,
+ const unsigned char *addr);
+ int (*clr_mc_addr)(struct hnae_handle *handle);
+ int (*set_mc_addr)(struct hnae_handle *handle, void *addr);
+ int (*set_mtu)(struct hnae_handle *handle, int new_mtu);
+ void (*set_tso_stats)(struct hnae_handle *handle, int enable);
+ void (*update_stats)(struct hnae_handle *handle,
+ struct net_device_stats *net_stats);
+ void (*get_stats)(struct hnae_handle *handle, u64 *data);
+ void (*get_strings)(struct hnae_handle *handle,
+ u32 stringset, u8 *data);
+ int (*get_sset_count)(struct hnae_handle *handle, int stringset);
+ void (*update_led_status)(struct hnae_handle *handle);
+ int (*set_led_id)(struct hnae_handle *handle,
+ enum hnae_led_state status);
+ void (*get_regs)(struct hnae_handle *handle, void *data);
+ int (*get_regs_len)(struct hnae_handle *handle);
+ u32 (*get_rss_key_size)(struct hnae_handle *handle);
+ u32 (*get_rss_indir_size)(struct hnae_handle *handle);
+ int (*get_rss)(struct hnae_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc);
+ int (*set_rss)(struct hnae_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc);
+};
+
+struct hnae_ae_dev {
+ struct device cls_dev; /* the class dev */
+ struct device *dev; /* the presented dev */
+ struct hnae_ae_ops *ops;
+ struct list_head node;
+ struct module *owner; /* the module who provides this dev */
+ int id;
+ char name[AE_NAME_SIZE];
+ struct list_head handle_list;
+ spinlock_t lock; /* lock to protect the handle_list */
+};
+
+struct hnae_handle {
+ struct device *owner_dev; /* the device which make use of this handle */
+ struct hnae_ae_dev *dev; /* the device who provides this handle */
+ struct phy_device *phy_dev;
+ phy_interface_t phy_if;
+ u32 if_support;
+ int q_num;
+ int vf_id;
+ unsigned long coal_last_jiffies;
+ u32 coal_param; /* self adapt coalesce param */
+ /* the ring index of last ring that set coal param */
+ u32 coal_ring_idx;
+ u32 eport_id;
+ u32 dport_id; /* v2 tx bd should fill the dport_id */
+ bool coal_adapt_en;
+ enum hnae_port_type port_type;
+ enum hnae_media_type media_type;
+ struct list_head node; /* list to hnae_ae_dev->handle_list */
+ struct hnae_buf_ops *bops; /* operation for the buffer */
+ struct hnae_queue **qs; /* array base of all queues */
+};
+
+#define ring_to_dev(ring) ((ring)->q->dev->dev)
+
+struct hnae_handle *hnae_get_handle(struct device *owner_dev,
+ const struct fwnode_handle *fwnode,
+ u32 port_id,
+ struct hnae_buf_ops *bops);
+
+void hnae_put_handle(struct hnae_handle *handle);
+int hnae_ae_register(struct hnae_ae_dev *dev, struct module *owner);
+void hnae_ae_unregister(struct hnae_ae_dev *dev);
+
+int hnae_register_notifier(struct notifier_block *nb);
+void hnae_unregister_notifier(struct notifier_block *nb);
+int hnae_reinit_handle(struct hnae_handle *handle);
+
+#define hnae_queue_xmit(q, buf_num) writel_relaxed(buf_num, \
+ (q)->tx_ring.io_base + RCB_REG_TAIL)
+
+#ifndef assert
+#define assert(cond)
+#endif
+
+static inline int hnae_reserve_buffer_map(struct hnae_ring *ring,
+ struct hnae_desc_cb *cb)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ int ret;
+
+ ret = bops->alloc_buffer(ring, cb);
+ if (ret)
+ goto out;
+
+ ret = bops->map_buffer(ring, cb);
+ if (ret)
+ goto out_with_buf;
+
+ return 0;
+
+out_with_buf:
+ bops->free_buffer(ring, cb);
+out:
+ return ret;
+}
+
+static inline int hnae_alloc_buffer_attach(struct hnae_ring *ring, int i)
+{
+ int ret = hnae_reserve_buffer_map(ring, &ring->desc_cb[i]);
+
+ if (ret)
+ return ret;
+
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+
+ return 0;
+}
+
+static inline void hnae_buffer_detach(struct hnae_ring *ring, int i)
+{
+ ring->q->handle->bops->unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc[i].addr = 0;
+}
+
+static inline void hnae_free_buffer_detach(struct hnae_ring *ring, int i)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+ struct hnae_desc_cb *cb = &ring->desc_cb[i];
+
+ if (!ring->desc_cb[i].dma)
+ return;
+
+ hnae_buffer_detach(ring, i);
+ bops->free_buffer(ring, cb);
+}
+
+/* detach a in-used buffer and replace with a reserved one */
+static inline void hnae_replace_buffer(struct hnae_ring *ring, int i,
+ struct hnae_desc_cb *res_cb)
+{
+ struct hnae_buf_ops *bops = ring->q->handle->bops;
+
+ bops->unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc_cb[i] = *res_cb;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+static inline void hnae_reuse_buffer(struct hnae_ring *ring, int i)
+{
+ ring->desc_cb[i].reuse_flag = 0;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
+ + ring->desc_cb[i].page_offset);
+ ring->desc[i].rx.ipoff_bnum_pid_flag = 0;
+}
+
+/* when reinit buffer size, we should reinit buffer description */
+static inline void hnae_reinit_all_ring_desc(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++)
+ ring->desc[j].addr = cpu_to_le64(ring->desc_cb[j].dma);
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
+/* when reinit buffer size, we should reinit page offset */
+static inline void hnae_reinit_all_ring_page_off(struct hnae_handle *h)
+{
+ int i, j;
+ struct hnae_ring *ring;
+
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ for (j = 0; j < ring->desc_num; j++) {
+ ring->desc_cb[j].page_offset = 0;
+ if (ring->desc[j].addr !=
+ cpu_to_le64(ring->desc_cb[j].dma))
+ ring->desc[j].addr =
+ cpu_to_le64(ring->desc_cb[j].dma);
+ }
+ }
+
+ wmb(); /* commit all data before submit */
+}
+
+#define hnae_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= ((val) << (shift)) & (mask); \
+ } while (0)
+
+#define hnae_set_bit(origin, shift, val) \
+ hnae_set_field((origin), (0x1 << (shift)), (shift), (val))
+
+#define hnae_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define hnae_get_bit(origin, shift) \
+ hnae_get_field((origin), (0x1 << (shift)), (shift))
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
new file mode 100644
index 000000000..b98244f75
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c
@@ -0,0 +1,1025 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include "hnae.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+static struct hns_mac_cb *hns_get_mac_cb(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ return vf_cb->mac_cb;
+}
+
+static struct dsaf_device *hns_ae_get_dsaf_dev(struct hnae_ae_dev *dev)
+{
+ return container_of(dev, struct dsaf_device, ae_dev);
+}
+
+static struct hns_ppe_cb *hns_get_ppe_cb(struct hnae_handle *handle)
+{
+ int ppe_index;
+ struct ppe_common_cb *ppe_comm;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ ppe_comm = vf_cb->dsaf_dev->ppe_common[0];
+ ppe_index = vf_cb->port_index;
+
+ return &ppe_comm->ppe_cb[ppe_index];
+}
+
+static int hns_ae_get_q_num_per_vf(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ return dsaf_dev->rcb_common[0]->max_q_per_vf;
+}
+
+static int hns_ae_get_vf_num_per_port(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ return dsaf_dev->rcb_common[0]->max_vfn;
+}
+
+static struct ring_pair_cb *hns_ae_get_base_ring_pair(
+ struct dsaf_device *dsaf_dev, int port)
+{
+ struct rcb_common_cb *rcb_comm = dsaf_dev->rcb_common[0];
+ int q_num = rcb_comm->max_q_per_vf;
+ int vf_num = rcb_comm->max_vfn;
+
+ return &rcb_comm->ring_pair_cb[port * q_num * vf_num];
+}
+
+static struct ring_pair_cb *hns_ae_get_ring_pair(struct hnae_queue *q)
+{
+ return container_of(q, struct ring_pair_cb, q);
+}
+
+static struct hnae_handle *hns_ae_get_handle(struct hnae_ae_dev *dev,
+ u32 port_id)
+{
+ int vfnum_per_port;
+ int qnum_per_vf;
+ int i;
+ struct dsaf_device *dsaf_dev;
+ struct hnae_handle *ae_handle;
+ struct ring_pair_cb *ring_pair_cb;
+ struct hnae_vf_cb *vf_cb;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(dev);
+
+ ring_pair_cb = hns_ae_get_base_ring_pair(dsaf_dev, port_id);
+ vfnum_per_port = hns_ae_get_vf_num_per_port(dsaf_dev, port_id);
+ qnum_per_vf = hns_ae_get_q_num_per_vf(dsaf_dev, port_id);
+
+ vf_cb = kzalloc(sizeof(*vf_cb) +
+ qnum_per_vf * sizeof(struct hnae_queue *), GFP_KERNEL);
+ if (unlikely(!vf_cb)) {
+ dev_err(dsaf_dev->dev, "malloc vf_cb fail!\n");
+ ae_handle = ERR_PTR(-ENOMEM);
+ goto handle_err;
+ }
+ ae_handle = &vf_cb->ae_handle;
+ /* ae_handle Init */
+ ae_handle->owner_dev = dsaf_dev->dev;
+ ae_handle->dev = dev;
+ ae_handle->q_num = qnum_per_vf;
+ ae_handle->coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
+
+ /* find ring pair, and set vf id*/
+ for (ae_handle->vf_id = 0;
+ ae_handle->vf_id < vfnum_per_port; ae_handle->vf_id++) {
+ if (!ring_pair_cb->used_by_vf)
+ break;
+ ring_pair_cb += qnum_per_vf;
+ }
+ if (ae_handle->vf_id >= vfnum_per_port) {
+ dev_err(dsaf_dev->dev, "malloc queue fail!\n");
+ ae_handle = ERR_PTR(-EINVAL);
+ goto vf_id_err;
+ }
+
+ ae_handle->qs = (struct hnae_queue **)(&ae_handle->qs + 1);
+ for (i = 0; i < qnum_per_vf; i++) {
+ ae_handle->qs[i] = &ring_pair_cb->q;
+ ae_handle->qs[i]->rx_ring.q = ae_handle->qs[i];
+ ae_handle->qs[i]->tx_ring.q = ae_handle->qs[i];
+
+ ring_pair_cb->used_by_vf = 1;
+ ring_pair_cb++;
+ }
+
+ vf_cb->dsaf_dev = dsaf_dev;
+ vf_cb->port_index = port_id;
+ vf_cb->mac_cb = dsaf_dev->mac_cb[port_id];
+
+ ae_handle->phy_if = vf_cb->mac_cb->phy_if;
+ ae_handle->phy_dev = vf_cb->mac_cb->phy_dev;
+ ae_handle->if_support = vf_cb->mac_cb->if_support;
+ ae_handle->port_type = vf_cb->mac_cb->mac_type;
+ ae_handle->media_type = vf_cb->mac_cb->media_type;
+ ae_handle->dport_id = port_id;
+
+ return ae_handle;
+vf_id_err:
+ kfree(vf_cb);
+handle_err:
+ return ae_handle;
+}
+
+static void hns_ae_put_handle(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ int i;
+
+ for (i = 0; i < handle->q_num; i++)
+ hns_ae_get_ring_pair(handle->qs[i])->used_by_vf = 0;
+
+ kfree(vf_cb);
+}
+
+static int hns_ae_wait_flow_down(struct hnae_handle *handle)
+{
+ struct dsaf_device *dsaf_dev;
+ struct hns_ppe_cb *ppe_cb;
+ struct hnae_vf_cb *vf_cb;
+ int ret;
+ int i;
+
+ for (i = 0; i < handle->q_num; i++) {
+ ret = hns_rcb_wait_tx_ring_clean(handle->qs[i]);
+ if (ret)
+ return ret;
+ }
+
+ ppe_cb = hns_get_ppe_cb(handle);
+ ret = hns_ppe_wait_tx_fifo_clean(ppe_cb);
+ if (ret)
+ return ret;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+ if (!dsaf_dev)
+ return -EINVAL;
+ ret = hns_dsaf_wait_pkt_clean(dsaf_dev, handle->dport_id);
+ if (ret)
+ return ret;
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ ret = hns_mac_wait_fifo_clean(vf_cb->mac_cb);
+ if (ret)
+ return ret;
+
+ mdelay(10);
+ return 0;
+}
+
+static void hns_ae_ring_enable_all(struct hnae_handle *handle, int val)
+{
+ int q_num = handle->q_num;
+ int i;
+
+ for (i = 0; i < q_num; i++)
+ hns_rcb_ring_enable_hw(handle->qs[i], val);
+}
+
+static void hns_ae_init_queue(struct hnae_queue *q)
+{
+ struct ring_pair_cb *ring =
+ container_of(q, struct ring_pair_cb, q);
+
+ hns_rcb_init_hw(ring);
+}
+
+static void hns_ae_fini_queue(struct hnae_queue *q)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(q->handle);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_rcb_reset_ring_hw(q);
+}
+
+static int hns_ae_set_mac_address(struct hnae_handle *handle, void *p)
+{
+ int ret;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (!p || !is_valid_ether_addr((const u8 *)p)) {
+ dev_err(handle->owner_dev, "is not valid ether addr !\n");
+ return -EADDRNOTAVAIL;
+ }
+
+ ret = hns_mac_change_vf_addr(mac_cb, handle->vf_id, p);
+ if (ret != 0) {
+ dev_err(handle->owner_dev,
+ "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_ae_add_uc_address(struct hnae_handle *handle,
+ const unsigned char *addr)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return -ENOSPC;
+
+ return hns_mac_add_uc_addr(mac_cb, handle->vf_id, addr);
+}
+
+static int hns_ae_rm_uc_address(struct hnae_handle *handle,
+ const unsigned char *addr)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return -ENOSPC;
+
+ return hns_mac_rm_uc_addr(mac_cb, handle->vf_id, addr);
+}
+
+static int hns_ae_set_multicast_one(struct hnae_handle *handle, void *addr)
+{
+ int ret;
+ char *mac_addr = (char *)addr;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ u8 port_num;
+
+ assert(mac_cb);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return 0;
+
+ ret = hns_mac_set_multi(mac_cb, mac_cb->mac_id, mac_addr, true);
+ if (ret) {
+ dev_err(handle->owner_dev,
+ "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
+ mac_addr, mac_cb->mac_id, ret);
+ return ret;
+ }
+
+ ret = hns_mac_get_inner_port_num(mac_cb, handle->vf_id, &port_num);
+ if (ret)
+ return ret;
+
+ ret = hns_mac_set_multi(mac_cb, port_num, mac_addr, true);
+ if (ret)
+ dev_err(handle->owner_dev,
+ "mac add mul_mac:%pM port%d fail, ret = %#x!\n",
+ mac_addr, DSAF_BASE_INNER_PORT_NUM, ret);
+
+ return ret;
+}
+
+static int hns_ae_clr_multicast(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return 0;
+
+ return hns_mac_clr_multicast(mac_cb, handle->vf_id);
+}
+
+static int hns_ae_set_mtu(struct hnae_handle *handle, int new_mtu)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct hnae_queue *q;
+ u32 rx_buf_size;
+ int i, ret;
+
+ /* when buf_size is 2048, max mtu is 6K for rx ring max bd num is 3. */
+ if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+ if (new_mtu <= BD_SIZE_2048_MAX_MTU)
+ rx_buf_size = 2048;
+ else
+ rx_buf_size = 4096;
+ } else {
+ rx_buf_size = mac_cb->dsaf_dev->buf_size;
+ }
+
+ ret = hns_mac_set_mtu(mac_cb, new_mtu, rx_buf_size);
+
+ if (!ret) {
+ /* reinit ring buf_size */
+ for (i = 0; i < handle->q_num; i++) {
+ q = handle->qs[i];
+ q->rx_ring.buf_size = rx_buf_size;
+ hns_rcb_set_rx_ring_bs(q, rx_buf_size);
+ }
+ }
+
+ return ret;
+}
+
+static void hns_ae_set_tso_stats(struct hnae_handle *handle, int enable)
+{
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ hns_ppe_set_tso_enable(ppe_cb, enable);
+}
+
+static int hns_ae_start(struct hnae_handle *handle)
+{
+ int ret;
+ int k;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ ret = hns_mac_vm_config_bc_en(mac_cb, 0, true);
+ if (ret)
+ return ret;
+
+ for (k = 0; k < handle->q_num; k++) {
+ if (AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver))
+ hns_rcb_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ else
+ hns_rcbv2_int_clr_hw(handle->qs[k],
+ RCB_INT_FLAG_TX | RCB_INT_FLAG_RX);
+ }
+ hns_ae_ring_enable_all(handle, 1);
+ msleep(100);
+
+ hns_mac_start(mac_cb);
+
+ return 0;
+}
+
+static void hns_ae_stop(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ /* just clean tx fbd, neednot rx fbd*/
+ hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_TX);
+
+ msleep(20);
+
+ hns_mac_stop(mac_cb);
+
+ usleep_range(10000, 20000);
+
+ hns_ae_ring_enable_all(handle, 0);
+
+ /* clean rx fbd. */
+ hns_rcb_wait_fbd_clean(handle->qs, handle->q_num, RCB_INT_FLAG_RX);
+
+ (void)hns_mac_vm_config_bc_en(mac_cb, 0, false);
+}
+
+static void hns_ae_reset(struct hnae_handle *handle)
+{
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ hns_mac_reset(vf_cb->mac_cb);
+ hns_ppe_reset_common(vf_cb->dsaf_dev, 0);
+ }
+}
+
+static void hns_ae_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+{
+ u32 flag;
+
+ if (is_tx_ring(ring))
+ flag = RCB_INT_FLAG_TX;
+ else
+ flag = RCB_INT_FLAG_RX;
+
+ hns_rcb_int_ctrl_hw(ring->q, flag, mask);
+}
+
+static void hns_aev2_toggle_ring_irq(struct hnae_ring *ring, u32 mask)
+{
+ u32 flag;
+
+ if (is_tx_ring(ring))
+ flag = RCB_INT_FLAG_TX;
+ else
+ flag = RCB_INT_FLAG_RX;
+
+ hns_rcbv2_int_ctrl_hw(ring->q, flag, mask);
+}
+
+static int hns_ae_get_link_status(struct hnae_handle *handle)
+{
+ u32 link_status;
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ hns_mac_get_link_status(mac_cb, &link_status);
+
+ return !!link_status;
+}
+
+static int hns_ae_get_mac_info(struct hnae_handle *handle,
+ u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ return hns_mac_get_port_info(mac_cb, auto_neg, speed, duplex);
+}
+
+static bool hns_ae_need_adjust_link(struct hnae_handle *handle, int speed,
+ int duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ return hns_mac_need_adjust_link(mac_cb, speed, duplex);
+}
+
+static void hns_ae_adjust_link(struct hnae_handle *handle, int speed,
+ int duplex)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ switch (mac_cb->dsaf_dev->dsaf_ver) {
+ case AE_VERSION_1:
+ hns_mac_adjust_link(mac_cb, speed, duplex);
+ break;
+
+ case AE_VERSION_2:
+ /* chip need to clear all pkt inside */
+ hns_mac_disable(mac_cb, MAC_COMM_MODE_RX);
+ if (hns_ae_wait_flow_down(handle)) {
+ hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+ break;
+ }
+
+ hns_mac_adjust_link(mac_cb, speed, duplex);
+ hns_mac_enable(mac_cb, MAC_COMM_MODE_RX);
+ break;
+
+ default:
+ break;
+ }
+
+ return;
+}
+
+static void hns_ae_get_ring_bdnum_limit(struct hnae_queue *queue,
+ u32 *uplimit)
+{
+ *uplimit = HNS_RCB_RING_MAX_PENDING_BD;
+}
+
+static void hns_ae_get_pauseparam(struct hnae_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+
+ hns_mac_get_autoneg(mac_cb, auto_neg);
+
+ hns_mac_get_pauseparam(mac_cb, rx_en, tx_en);
+
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_rx_mac_pause_en(dsaf_dev, mac_cb->mac_id, rx_en);
+}
+
+static int hns_ae_set_autoneg(struct hnae_handle *handle, u8 enable)
+{
+ assert(handle);
+
+ return hns_mac_set_autoneg(hns_get_mac_cb(handle), enable);
+}
+
+static void hns_ae_set_promisc_mode(struct hnae_handle *handle, u32 en)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+
+ hns_dsaf_set_promisc_mode(hns_ae_get_dsaf_dev(handle->dev), en);
+ hns_mac_set_promisc(mac_cb, (u8)!!en);
+}
+
+static int hns_ae_get_autoneg(struct hnae_handle *handle)
+{
+ u32 auto_neg;
+
+ assert(handle);
+
+ hns_mac_get_autoneg(hns_get_mac_cb(handle), &auto_neg);
+
+ return auto_neg;
+}
+
+static int hns_ae_set_pauseparam(struct hnae_handle *handle,
+ u32 autoneg, u32 rx_en, u32 tx_en)
+{
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ int ret;
+
+ ret = hns_mac_set_autoneg(mac_cb, autoneg);
+ if (ret)
+ return ret;
+
+ /* Service port's pause feature is provided by DSAF, not mac */
+ if (handle->port_type == HNAE_PORT_SERVICE) {
+ ret = hns_dsaf_set_rx_mac_pause_en(dsaf_dev,
+ mac_cb->mac_id, rx_en);
+ if (ret)
+ return ret;
+ rx_en = 0;
+ }
+ return hns_mac_set_pauseparam(mac_cb, rx_en, tx_en);
+}
+
+static void hns_ae_get_coalesce_usecs(struct hnae_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs)
+{
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
+
+ *tx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+ *rx_usecs = hns_rcb_get_coalesce_usecs(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+}
+
+static void hns_ae_get_max_coalesced_frames(struct hnae_handle *handle,
+ u32 *tx_frames, u32 *rx_frames)
+{
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG)
+ *tx_frames = hns_rcb_get_rx_coalesced_frames(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm);
+ else
+ *tx_frames = hns_rcb_get_tx_coalesced_frames(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm);
+ *rx_frames = hns_rcb_get_rx_coalesced_frames(ring_pair->rcb_common,
+ ring_pair->port_id_in_comm);
+}
+
+static int hns_ae_set_coalesce_usecs(struct hnae_handle *handle,
+ u32 timeout)
+{
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
+
+ return hns_rcb_set_coalesce_usecs(
+ ring_pair->rcb_common, ring_pair->port_id_in_comm, timeout);
+}
+
+static int hns_ae_set_coalesce_frames(struct hnae_handle *handle,
+ u32 tx_frames, u32 rx_frames)
+{
+ int ret;
+ struct ring_pair_cb *ring_pair =
+ container_of(handle->qs[0], struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG) {
+ if (tx_frames != rx_frames)
+ return -EINVAL;
+ return hns_rcb_set_rx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, rx_frames);
+ } else {
+ if (tx_frames != 1)
+ return -EINVAL;
+ ret = hns_rcb_set_tx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, tx_frames);
+ if (ret)
+ return ret;
+
+ return hns_rcb_set_rx_coalesced_frames(
+ ring_pair->rcb_common,
+ ring_pair->port_id_in_comm, rx_frames);
+ }
+}
+
+static void hns_ae_get_coalesce_range(struct hnae_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high)
+{
+ struct dsaf_device *dsaf_dev;
+
+ assert(handle);
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ *tx_frames_low = HNS_RCB_TX_FRAMES_LOW;
+ *rx_frames_low = HNS_RCB_RX_FRAMES_LOW;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) ||
+ handle->port_type == HNAE_PORT_DEBUG)
+ *tx_frames_high =
+ (dsaf_dev->desc_num - 1 > HNS_RCB_TX_FRAMES_HIGH) ?
+ HNS_RCB_TX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+ else
+ *tx_frames_high = 1;
+
+ *rx_frames_high = (dsaf_dev->desc_num - 1 > HNS_RCB_RX_FRAMES_HIGH) ?
+ HNS_RCB_RX_FRAMES_HIGH : dsaf_dev->desc_num - 1;
+ *tx_usecs_low = HNS_RCB_TX_USECS_LOW;
+ *rx_usecs_low = HNS_RCB_RX_USECS_LOW;
+ *tx_usecs_high = HNS_RCB_TX_USECS_HIGH;
+ *rx_usecs_high = HNS_RCB_RX_USECS_HIGH;
+}
+
+static void hns_ae_update_stats(struct hnae_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ int port;
+ int idx;
+ struct dsaf_device *dsaf_dev;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ struct hnae_queue *queue;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ u64 tx_bytes = 0, rx_bytes = 0, tx_packets = 0, rx_packets = 0;
+ u64 rx_errors = 0, tx_errors = 0, tx_dropped = 0;
+ u64 rx_missed_errors = 0;
+
+ dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+ if (!dsaf_dev)
+ return;
+ port = vf_cb->port_index;
+ ppe_cb = hns_get_ppe_cb(handle);
+ mac_cb = hns_get_mac_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ queue = handle->qs[idx];
+ hns_rcb_update_stats(queue);
+
+ tx_bytes += queue->tx_ring.stats.tx_bytes;
+ tx_packets += queue->tx_ring.stats.tx_pkts;
+ rx_bytes += queue->rx_ring.stats.rx_bytes;
+ rx_packets += queue->rx_ring.stats.rx_pkts;
+
+ rx_errors += queue->rx_ring.stats.err_pkt_len
+ + queue->rx_ring.stats.l2_err
+ + queue->rx_ring.stats.l3l4_csum_err;
+ }
+
+ hns_ppe_update_stats(ppe_cb);
+ rx_missed_errors = ppe_cb->hw_stats.rx_drop_no_buf;
+ tx_errors += ppe_cb->hw_stats.tx_err_checksum
+ + ppe_cb->hw_stats.tx_err_fifo_empty;
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE) {
+ hns_dsaf_update_stats(dsaf_dev, port);
+ /* for port upline direction, i.e., rx. */
+ rx_missed_errors += dsaf_dev->hw_stats[port].bp_drop;
+ rx_missed_errors += dsaf_dev->hw_stats[port].pad_drop;
+ rx_missed_errors += dsaf_dev->hw_stats[port].crc_false;
+
+ /* for port downline direction, i.e., tx. */
+ port = port + DSAF_PPE_INODE_BASE;
+ hns_dsaf_update_stats(dsaf_dev, port);
+ tx_dropped += dsaf_dev->hw_stats[port].bp_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].pad_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].crc_false;
+ tx_dropped += dsaf_dev->hw_stats[port].rslt_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].vlan_drop;
+ tx_dropped += dsaf_dev->hw_stats[port].stp_drop;
+ }
+
+ hns_mac_update_stats(mac_cb);
+ rx_errors += mac_cb->hw_stats.rx_fifo_overrun_err;
+
+ tx_errors += mac_cb->hw_stats.tx_bad_pkts
+ + mac_cb->hw_stats.tx_fragment_err
+ + mac_cb->hw_stats.tx_jabber_err
+ + mac_cb->hw_stats.tx_underrun_err
+ + mac_cb->hw_stats.tx_crc_err;
+
+ net_stats->tx_bytes = tx_bytes;
+ net_stats->tx_packets = tx_packets;
+ net_stats->rx_bytes = rx_bytes;
+ net_stats->rx_dropped = 0;
+ net_stats->rx_packets = rx_packets;
+ net_stats->rx_errors = rx_errors;
+ net_stats->tx_errors = tx_errors;
+ net_stats->tx_dropped = tx_dropped;
+ net_stats->rx_missed_errors = rx_missed_errors;
+ net_stats->rx_crc_errors = mac_cb->hw_stats.rx_fcs_err;
+ net_stats->rx_frame_errors = mac_cb->hw_stats.rx_align_err;
+ net_stats->rx_fifo_errors = mac_cb->hw_stats.rx_fifo_overrun_err;
+ net_stats->rx_length_errors = mac_cb->hw_stats.rx_len_err;
+ net_stats->multicast = mac_cb->hw_stats.rx_mc_pkts;
+}
+
+static void hns_ae_get_stats(struct hnae_handle *handle, u64 *data)
+{
+ int idx;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ u64 *p = data;
+ struct hnae_vf_cb *vf_cb;
+
+ if (!handle || !data) {
+ pr_err("hns_ae_get_stats NULL handle or data pointer!\n");
+ return;
+ }
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ mac_cb = hns_get_mac_cb(handle);
+ ppe_cb = hns_get_ppe_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ hns_rcb_get_stats(handle->qs[idx], p);
+ p += hns_rcb_get_ring_sset_count((int)ETH_SS_STATS);
+ }
+
+ hns_ppe_get_stats(ppe_cb, p);
+ p += hns_ppe_get_sset_count((int)ETH_SS_STATS);
+
+ hns_mac_get_stats(mac_cb, p);
+ p += hns_mac_get_sset_count(mac_cb, (int)ETH_SS_STATS);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_stats(vf_cb->dsaf_dev, p, vf_cb->port_index);
+}
+
+static void hns_ae_get_strings(struct hnae_handle *handle,
+ u32 stringset, u8 *data)
+{
+ int port;
+ int idx;
+ struct hns_mac_cb *mac_cb;
+ struct hns_ppe_cb *ppe_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+ u8 *p = data;
+ struct hnae_vf_cb *vf_cb;
+
+ assert(handle);
+
+ vf_cb = hns_ae_get_vf_cb(handle);
+ port = vf_cb->port_index;
+ mac_cb = hns_get_mac_cb(handle);
+ ppe_cb = hns_get_ppe_cb(handle);
+
+ for (idx = 0; idx < handle->q_num; idx++) {
+ hns_rcb_get_strings(stringset, p, idx);
+ p += ETH_GSTRING_LEN * hns_rcb_get_ring_sset_count(stringset);
+ }
+
+ hns_ppe_get_strings(ppe_cb, stringset, p);
+ p += ETH_GSTRING_LEN * hns_ppe_get_sset_count(stringset);
+
+ hns_mac_get_strings(mac_cb, stringset, p);
+ p += ETH_GSTRING_LEN * hns_mac_get_sset_count(mac_cb, stringset);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_strings(stringset, p, port, dsaf_dev);
+}
+
+static int hns_ae_get_sset_count(struct hnae_handle *handle, int stringset)
+{
+ u32 sset_count = 0;
+ struct hns_mac_cb *mac_cb;
+ struct dsaf_device *dsaf_dev = hns_ae_get_dsaf_dev(handle->dev);
+
+ assert(handle);
+
+ mac_cb = hns_get_mac_cb(handle);
+
+ sset_count += hns_rcb_get_ring_sset_count(stringset) * handle->q_num;
+ sset_count += hns_ppe_get_sset_count(stringset);
+ sset_count += hns_mac_get_sset_count(mac_cb, stringset);
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ sset_count += hns_dsaf_get_sset_count(dsaf_dev, stringset);
+
+ return sset_count;
+}
+
+static int hns_ae_config_loopback(struct hnae_handle *handle,
+ enum hnae_loop loop, int en)
+{
+ int ret;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ struct hns_mac_cb *mac_cb = hns_get_mac_cb(handle);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+
+ switch (loop) {
+ case MAC_INTERNALLOOP_PHY:
+ ret = 0;
+ break;
+ case MAC_INTERNALLOOP_SERDES:
+ ret = dsaf_dev->misc_op->cfg_serdes_loopback(vf_cb->mac_cb,
+ !!en);
+ break;
+ case MAC_INTERNALLOOP_MAC:
+ ret = hns_mac_config_mac_loopback(vf_cb->mac_cb, loop, en);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static void hns_ae_update_led_status(struct hnae_handle *handle)
+{
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+ mac_cb = hns_get_mac_cb(handle);
+ if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
+ return;
+
+ hns_set_led_opt(mac_cb);
+}
+
+static int hns_ae_cpld_set_led_id(struct hnae_handle *handle,
+ enum hnae_led_state status)
+{
+ struct hns_mac_cb *mac_cb;
+
+ assert(handle);
+
+ mac_cb = hns_get_mac_cb(handle);
+
+ return hns_cpld_led_set_id(mac_cb, status);
+}
+
+static void hns_ae_get_regs(struct hnae_handle *handle, void *data)
+{
+ u32 *p = data;
+ int i;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ hns_ppe_get_regs(ppe_cb, p);
+ p += hns_ppe_get_regs_count();
+
+ hns_rcb_get_common_regs(vf_cb->dsaf_dev->rcb_common[0], p);
+ p += hns_rcb_get_common_regs_count();
+
+ for (i = 0; i < handle->q_num; i++) {
+ hns_rcb_get_ring_regs(handle->qs[i], p);
+ p += hns_rcb_get_ring_regs_count();
+ }
+
+ hns_mac_get_regs(vf_cb->mac_cb, p);
+ p += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ hns_dsaf_get_regs(vf_cb->dsaf_dev, vf_cb->port_index, p);
+}
+
+static int hns_ae_get_regs_len(struct hnae_handle *handle)
+{
+ u32 total_num;
+ struct hnae_vf_cb *vf_cb = hns_ae_get_vf_cb(handle);
+
+ total_num = hns_ppe_get_regs_count();
+ total_num += hns_rcb_get_common_regs_count();
+ total_num += hns_rcb_get_ring_regs_count() * handle->q_num;
+ total_num += hns_mac_get_regs_count(vf_cb->mac_cb);
+
+ if (vf_cb->mac_cb->mac_type == HNAE_PORT_SERVICE)
+ total_num += hns_dsaf_get_regs_count();
+
+ return total_num;
+}
+
+static u32 hns_ae_get_rss_key_size(struct hnae_handle *handle)
+{
+ return HNS_PPEV2_RSS_KEY_SIZE;
+}
+
+static u32 hns_ae_get_rss_indir_size(struct hnae_handle *handle)
+{
+ return HNS_PPEV2_RSS_IND_TBL_SIZE;
+}
+
+static int hns_ae_get_rss(struct hnae_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ /* currently we support only one type of hash function i.e. Toep hash */
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ /* get the RSS Key required by the user */
+ if (key)
+ memcpy(key, ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
+
+ /* update the current hash->queue mappings from the shadow RSS table */
+ if (indir)
+ memcpy(indir, ppe_cb->rss_indir_table,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+
+ return 0;
+}
+
+static int hns_ae_set_rss(struct hnae_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hns_ppe_cb *ppe_cb = hns_get_ppe_cb(handle);
+
+ /* set the RSS Hash Key if specififed by the user */
+ if (key) {
+ memcpy(ppe_cb->rss_key, key, HNS_PPEV2_RSS_KEY_SIZE);
+ hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
+ }
+
+ if (indir) {
+ /* update the shadow RSS table with user specified qids */
+ memcpy(ppe_cb->rss_indir_table, indir,
+ HNS_PPEV2_RSS_IND_TBL_SIZE * sizeof(*indir));
+
+ /* now update the hardware */
+ hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ }
+
+ return 0;
+}
+
+static struct hnae_ae_ops hns_dsaf_ops = {
+ .get_handle = hns_ae_get_handle,
+ .put_handle = hns_ae_put_handle,
+ .init_queue = hns_ae_init_queue,
+ .fini_queue = hns_ae_fini_queue,
+ .start = hns_ae_start,
+ .stop = hns_ae_stop,
+ .reset = hns_ae_reset,
+ .toggle_ring_irq = hns_ae_toggle_ring_irq,
+ .get_status = hns_ae_get_link_status,
+ .get_info = hns_ae_get_mac_info,
+ .adjust_link = hns_ae_adjust_link,
+ .need_adjust_link = hns_ae_need_adjust_link,
+ .set_loopback = hns_ae_config_loopback,
+ .get_ring_bdnum_limit = hns_ae_get_ring_bdnum_limit,
+ .get_pauseparam = hns_ae_get_pauseparam,
+ .set_autoneg = hns_ae_set_autoneg,
+ .get_autoneg = hns_ae_get_autoneg,
+ .set_pauseparam = hns_ae_set_pauseparam,
+ .get_coalesce_usecs = hns_ae_get_coalesce_usecs,
+ .get_max_coalesced_frames = hns_ae_get_max_coalesced_frames,
+ .set_coalesce_usecs = hns_ae_set_coalesce_usecs,
+ .set_coalesce_frames = hns_ae_set_coalesce_frames,
+ .get_coalesce_range = hns_ae_get_coalesce_range,
+ .set_promisc_mode = hns_ae_set_promisc_mode,
+ .set_mac_addr = hns_ae_set_mac_address,
+ .add_uc_addr = hns_ae_add_uc_address,
+ .rm_uc_addr = hns_ae_rm_uc_address,
+ .set_mc_addr = hns_ae_set_multicast_one,
+ .clr_mc_addr = hns_ae_clr_multicast,
+ .set_mtu = hns_ae_set_mtu,
+ .update_stats = hns_ae_update_stats,
+ .set_tso_stats = hns_ae_set_tso_stats,
+ .get_stats = hns_ae_get_stats,
+ .get_strings = hns_ae_get_strings,
+ .get_sset_count = hns_ae_get_sset_count,
+ .update_led_status = hns_ae_update_led_status,
+ .set_led_id = hns_ae_cpld_set_led_id,
+ .get_regs = hns_ae_get_regs,
+ .get_regs_len = hns_ae_get_regs_len,
+ .get_rss_key_size = hns_ae_get_rss_key_size,
+ .get_rss_indir_size = hns_ae_get_rss_indir_size,
+ .get_rss = hns_ae_get_rss,
+ .set_rss = hns_ae_set_rss
+};
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev)
+{
+ struct hnae_ae_dev *ae_dev = &dsaf_dev->ae_dev;
+ static atomic_t id = ATOMIC_INIT(-1);
+
+ switch (dsaf_dev->dsaf_ver) {
+ case AE_VERSION_1:
+ hns_dsaf_ops.toggle_ring_irq = hns_ae_toggle_ring_irq;
+ break;
+ case AE_VERSION_2:
+ hns_dsaf_ops.toggle_ring_irq = hns_aev2_toggle_ring_irq;
+ break;
+ default:
+ break;
+ }
+
+ snprintf(ae_dev->name, AE_NAME_SIZE, "%s%d", DSAF_DEVICE_NAME,
+ (int)atomic_inc_return(&id));
+ ae_dev->ops = &hns_dsaf_ops;
+ ae_dev->dev = dsaf_dev->dev;
+
+ return hnae_ae_register(ae_dev, THIS_MODULE);
+}
+
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev)
+{
+ hnae_ae_unregister(&dsaf_dev->ae_dev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
new file mode 100644
index 000000000..7fb7a4196
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -0,0 +1,760 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/delay.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_gmac.h"
+
+static const struct mac_stats_string g_gmac_stats_string[] = {
+ {"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+ {"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
+ {"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+ {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+ {"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
+ {"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+ {"gmac_rx_pkts_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+ {"gmac_rx_pkts_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+ {"gmac_rx_pkts_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+ {"gmac_rx_pkts_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+ {"gmac_rx_pkts_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+ {"gmac_rx_fcs_errors", MAC_STATS_FIELD_OFF(rx_fcs_err)},
+ {"gmac_rx_tagged", MAC_STATS_FIELD_OFF(rx_vlan_pkts)},
+ {"gmac_rx_data_err", MAC_STATS_FIELD_OFF(rx_data_err)},
+ {"gmac_rx_align_errors", MAC_STATS_FIELD_OFF(rx_align_err)},
+ {"gmac_rx_long_errors", MAC_STATS_FIELD_OFF(rx_oversize)},
+ {"gmac_rx_jabber_errors", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+ {"gmac_rx_pause_maccontrol", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+ {"gmac_rx_unknown_maccontrol", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+ {"gmac_rx_very_long_err", MAC_STATS_FIELD_OFF(rx_long_err)},
+ {"gmac_rx_runt_err", MAC_STATS_FIELD_OFF(rx_minto64)},
+ {"gmac_rx_short_err", MAC_STATS_FIELD_OFF(rx_under_min)},
+ {"gmac_rx_filt_pkt", MAC_STATS_FIELD_OFF(rx_filter_pkts)},
+ {"gmac_rx_octets_total_filt", MAC_STATS_FIELD_OFF(rx_filter_bytes)},
+ {"gmac_rx_overrun_cnt", MAC_STATS_FIELD_OFF(rx_fifo_overrun_err)},
+ {"gmac_rx_length_err", MAC_STATS_FIELD_OFF(rx_len_err)},
+ {"gmac_rx_fail_comma", MAC_STATS_FIELD_OFF(rx_comma_err)},
+
+ {"gmac_tx_octets_ok", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+ {"gmac_tx_octets_bad", MAC_STATS_FIELD_OFF(tx_bad_bytes)},
+ {"gmac_tx_uc_pkts", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+ {"gmac_tx_mc_pkts", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+ {"gmac_tx_bc_pkts", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+ {"gmac_tx_pkts_64octets", MAC_STATS_FIELD_OFF(tx_64bytes)},
+ {"gmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+ {"gmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+ {"gmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+ {"gmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+ {"gmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+ {"gmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+ {"gmac_tx_excessive_length_drop", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+ {"gmac_tx_underrun", MAC_STATS_FIELD_OFF(tx_underrun_err)},
+ {"gmac_tx_tagged", MAC_STATS_FIELD_OFF(tx_vlan)},
+ {"gmac_tx_crc_error", MAC_STATS_FIELD_OFF(tx_crc_err)},
+ {"gmac_tx_pause_frames", MAC_STATS_FIELD_OFF(tx_pfc_tc0)}
+};
+
+static void hns_gmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*enable GE rX/tX */
+ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 1);
+
+ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+ /* enable rx pcs */
+ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 0);
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 1);
+ }
+}
+
+static void hns_gmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /*disable GE rX/tX */
+ if (mode == MAC_COMM_MODE_TX || mode == MAC_COMM_MODE_RX_AND_TX)
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_TX_EN_B, 0);
+
+ if (mode == MAC_COMM_MODE_RX || mode == MAC_COMM_MODE_RX_AND_TX) {
+ /* disable rx pcs */
+ dsaf_set_dev_bit(drv, GMAC_PCS_RX_EN_REG, 0, 1);
+ dsaf_set_dev_bit(drv, GMAC_PORT_EN_REG, GMAC_PORT_RX_EN_B, 0);
+ }
+}
+
+/* hns_gmac_get_en - get port enable
+ * @mac_drv:mac device
+ * @rx:rx enable
+ * @tx:tx enable
+ */
+static void hns_gmac_get_en(void *mac_drv, u32 *rx, u32 *tx)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 porten;
+
+ porten = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+ *tx = dsaf_get_bit(porten, GMAC_PORT_TX_EN_B);
+ *rx = dsaf_get_bit(porten, GMAC_PORT_RX_EN_B);
+}
+
+static void hns_gmac_free(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ u32 mac_id = drv->mac_id;
+
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, mac_id, 0);
+}
+
+static void hns_gmac_set_tx_auto_pause_frames(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_field(drv, GMAC_FC_TX_TIMER_REG, GMAC_FC_TX_TIMER_M,
+ GMAC_FC_TX_TIMER_S, newval);
+}
+
+static void hns_gmac_get_tx_auto_pause_frames(void *mac_drv, u16 *newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *newval = dsaf_get_dev_field(drv, GMAC_FC_TX_TIMER_REG,
+ GMAC_FC_TX_TIMER_M, GMAC_FC_TX_TIMER_S);
+}
+
+static void hns_gmac_set_rx_auto_pause_frames(void *mac_drv, u32 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_PAUSE_EN_REG,
+ GMAC_PAUSE_EN_RX_FDFC_B, !!newval);
+}
+
+static void hns_gmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_field(drv, GMAC_MAX_FRM_SIZE_REG, GMAC_MAX_FRM_SIZE_M,
+ GMAC_MAX_FRM_SIZE_S, newval);
+
+ dsaf_set_dev_field(drv, GAMC_RX_MAX_FRAME, GMAC_MAX_FRM_SIZE_M,
+ GMAC_MAX_FRM_SIZE_S, newval);
+}
+
+static void hns_gmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ u32 tx_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_PAD_EN_B, !!newval);
+ dsaf_set_bit(tx_ctrl, GMAC_TX_CRC_ADD_B, !!newval);
+ dsaf_write_dev(drv, GMAC_TRANSMIT_CONTROL_REG, tx_ctrl);
+}
+
+static void hns_gmac_config_an_mode(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+ GMAC_TX_AN_EN_B, !!newval);
+}
+
+static void hns_gmac_tx_loop_pkt_dis(void *mac_drv)
+{
+ u32 tx_loop_pkt_pri;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ tx_loop_pkt_pri = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+ dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_EN_B, 1);
+ dsaf_set_bit(tx_loop_pkt_pri, GMAC_TX_LOOP_PKT_HIG_PRI_B, 0);
+ dsaf_write_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG, tx_loop_pkt_pri);
+}
+
+static void hns_gmac_set_duplex_type(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+ GMAC_DUPLEX_TYPE_B, !!newval);
+}
+
+static void hns_gmac_get_duplex_type(void *mac_drv,
+ enum hns_gmac_duplex_mdoe *duplex_mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *duplex_mode = (enum hns_gmac_duplex_mdoe)dsaf_get_dev_bit(
+ drv, GMAC_DUPLEX_TYPE_REG, GMAC_DUPLEX_TYPE_B);
+}
+
+static void hns_gmac_get_port_mode(void *mac_drv, enum hns_port_mode *port_mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+ drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+}
+
+static void hns_gmac_port_mode_get(void *mac_drv,
+ struct hns_gmac_port_mode_cfg *port_mode)
+{
+ u32 tx_ctrl;
+ u32 recv_ctrl;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ port_mode->port_mode = (enum hns_port_mode)dsaf_get_dev_field(
+ drv, GMAC_PORT_MODE_REG, GMAC_PORT_MODE_M, GMAC_PORT_MODE_S);
+
+ tx_ctrl = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ recv_ctrl = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+
+ port_mode->max_frm_size =
+ dsaf_get_dev_field(drv, GMAC_MAX_FRM_SIZE_REG,
+ GMAC_MAX_FRM_SIZE_M, GMAC_MAX_FRM_SIZE_S);
+ port_mode->short_runts_thr =
+ dsaf_get_dev_field(drv, GMAC_SHORT_RUNTS_THR_REG,
+ GMAC_SHORT_RUNTS_THR_M,
+ GMAC_SHORT_RUNTS_THR_S);
+
+ port_mode->pad_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_PAD_EN_B);
+ port_mode->crc_add = dsaf_get_bit(tx_ctrl, GMAC_TX_CRC_ADD_B);
+ port_mode->an_enable = dsaf_get_bit(tx_ctrl, GMAC_TX_AN_EN_B);
+
+ port_mode->runt_pkt_en =
+ dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_RUNT_PKT_EN_B);
+ port_mode->strip_pad_en =
+ dsaf_get_bit(recv_ctrl, GMAC_RECV_CTRL_STRIP_PAD_EN_B);
+}
+
+static void hns_gmac_pause_frm_cfg(void *mac_drv, u32 rx_pause_en,
+ u32 tx_pause_en)
+{
+ u32 pause_en;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+ dsaf_set_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B, !!rx_pause_en);
+ dsaf_set_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B, !!tx_pause_en);
+ dsaf_write_dev(drv, GMAC_PAUSE_EN_REG, pause_en);
+}
+
+static void hns_gmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_pause_en,
+ u32 *tx_pause_en)
+{
+ u32 pause_en;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ pause_en = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+
+ *rx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_RX_FDFC_B);
+ *tx_pause_en = dsaf_get_bit(pause_en, GMAC_PAUSE_EN_TX_FDFC_B);
+}
+
+static bool hns_gmac_need_adjust_link(void *mac_drv, enum mac_speed speed,
+ int duplex)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct hns_mac_cb *mac_cb = drv->mac_cb;
+
+ return (mac_cb->speed != speed) ||
+ (mac_cb->half_duplex == duplex);
+}
+
+static int hns_gmac_adjust_link(void *mac_drv, enum mac_speed speed,
+ u32 full_duplex)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_DUPLEX_TYPE_REG,
+ GMAC_DUPLEX_TYPE_B, !!full_duplex);
+
+ switch (speed) {
+ case MAC_SPEED_10:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x6);
+ break;
+ case MAC_SPEED_100:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x7);
+ break;
+ case MAC_SPEED_1000:
+ dsaf_set_dev_field(
+ drv, GMAC_PORT_MODE_REG,
+ GMAC_PORT_MODE_M, GMAC_PORT_MODE_S, 0x8);
+ break;
+ default:
+ dev_err(drv->dev,
+ "hns_gmac_adjust_link fail, speed%d mac%d\n",
+ speed, drv->mac_id);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hns_gmac_set_uc_match(void *mac_drv, u16 en)
+{
+ struct mac_driver *drv = mac_drv;
+
+ dsaf_set_dev_bit(drv, GMAC_REC_FILT_CONTROL_REG,
+ GMAC_UC_MATCH_EN_B, !en);
+ dsaf_set_dev_bit(drv, GMAC_STATION_ADDR_HIGH_2_REG,
+ GMAC_ADDR_EN_B, !en);
+}
+
+static void hns_gmac_set_promisc(void *mac_drv, u8 en)
+{
+ struct mac_driver *drv = mac_drv;
+
+ if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
+ hns_gmac_set_uc_match(mac_drv, en);
+}
+
+static int hns_gmac_wait_fifo_clean(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ int wait_cnt;
+ u32 val;
+
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ val = dsaf_read_dev(drv, GMAC_FIFO_STATE_REG);
+ /* bit5~bit0 is not send complete pkts */
+ if ((val & 0x3f) == 0)
+ break;
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(drv->dev,
+ "hns ge %d fifo was not idle.\n", drv->mac_id);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void hns_gmac_init(void *mac_drv)
+{
+ u32 port;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ port = drv->mac_id;
+
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 0);
+ mdelay(10);
+ dsaf_dev->misc_op->ge_srst(dsaf_dev, port, 1);
+ mdelay(10);
+ hns_gmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+ hns_gmac_tx_loop_pkt_dis(mac_drv);
+ if (drv->mac_cb->mac_type == HNAE_PORT_DEBUG)
+ hns_gmac_set_uc_match(mac_drv, 0);
+
+ hns_gmac_config_pad_and_crc(mac_drv, 1);
+
+ dsaf_set_dev_bit(drv, GMAC_MODE_CHANGE_EN_REG,
+ GMAC_MODE_CHANGE_EB_B, 1);
+
+ /* reduce gmac tx water line to avoid gmac hang-up
+ * in speed 100M and duplex half.
+ */
+ dsaf_set_dev_field(drv, GMAC_TX_WATER_LINE_REG, GMAC_TX_WATER_LINE_MASK,
+ GMAC_TX_WATER_LINE_SHIFT, 8);
+}
+
+static void hns_gmac_update_stats(void *mac_drv)
+{
+ struct mac_hw_stats *hw_stats = NULL;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ /* RX */
+ hw_stats->rx_good_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+ hw_stats->rx_bad_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+ hw_stats->rx_uc_pkts += dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+ hw_stats->rx_mc_pkts += dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+ hw_stats->rx_bc_pkts += dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+ hw_stats->rx_64bytes
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+ hw_stats->rx_65to127
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+ hw_stats->rx_128to255
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+ hw_stats->rx_256to511
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+ hw_stats->rx_512to1023
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+ hw_stats->rx_1024to1518
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+ hw_stats->rx_1519tomax
+ += dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+ hw_stats->rx_fcs_err += dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+ hw_stats->rx_vlan_pkts += dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+ hw_stats->rx_data_err += dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+ hw_stats->rx_align_err
+ += dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+ hw_stats->rx_oversize
+ += dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+ hw_stats->rx_jabber_err
+ += dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+ hw_stats->rx_pfc_tc0
+ += dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+ hw_stats->rx_unknown_ctrl
+ += dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+ hw_stats->rx_long_err
+ += dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+ hw_stats->rx_minto64
+ += dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+ hw_stats->rx_under_min
+ += dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+ hw_stats->rx_filter_pkts
+ += dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+ hw_stats->rx_filter_bytes
+ += dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+ hw_stats->rx_fifo_overrun_err
+ += dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+ hw_stats->rx_len_err
+ += dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+ hw_stats->rx_comma_err
+ += dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+ /* TX */
+ hw_stats->tx_good_bytes
+ += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+ hw_stats->tx_bad_bytes
+ += dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+ hw_stats->tx_uc_pkts += dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+ hw_stats->tx_mc_pkts += dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+ hw_stats->tx_bc_pkts += dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+ hw_stats->tx_64bytes
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+ hw_stats->tx_65to127
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+ hw_stats->tx_128to255
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+ hw_stats->tx_256to511
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+ hw_stats->tx_512to1023
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+ hw_stats->tx_1024to1518
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+ hw_stats->tx_1519tomax
+ += dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+ hw_stats->tx_jabber_err
+ += dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+ hw_stats->tx_underrun_err
+ += dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+ hw_stats->tx_vlan += dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+ hw_stats->tx_crc_err += dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+ hw_stats->tx_pfc_tc0
+ += dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+}
+
+static void hns_gmac_set_mac_addr(void *mac_drv, char *mac_addr)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+
+ u32 val = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+ u32 sta_addr_en = dsaf_get_bit(val, GMAC_ADDR_EN_B);
+
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_LOW_2_REG, low_val);
+ dsaf_write_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG,
+ high_val | (sta_addr_en << GMAC_ADDR_EN_B));
+}
+
+static int hns_gmac_config_loopback(void *mac_drv, enum hnae_loop loop_mode,
+ u8 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ switch (loop_mode) {
+ case MAC_INTERNALLOOP_MAC:
+ dsaf_set_dev_bit(drv, GMAC_LOOP_REG, GMAC_LP_REG_CF2MI_LP_EN_B,
+ !!enable);
+ break;
+ default:
+ dev_err(drv->dev, "loop_mode error\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hns_gmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+ enum hns_gmac_duplex_mdoe duplex;
+ enum hns_port_mode speed;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 rx;
+ u32 tx;
+ u16 fc_tx_timer;
+ struct hns_gmac_port_mode_cfg port_mode = { GMAC_10M_MII, 0 };
+
+ hns_gmac_port_mode_get(mac_drv, &port_mode);
+ mac_info->pad_and_crc_en = port_mode.crc_add && port_mode.pad_enable;
+ mac_info->auto_neg = port_mode.an_enable;
+
+ hns_gmac_get_tx_auto_pause_frames(mac_drv, &fc_tx_timer);
+ mac_info->tx_pause_time = fc_tx_timer;
+
+ hns_gmac_get_en(mac_drv, &rx, &tx);
+ mac_info->port_en = rx && tx;
+
+ hns_gmac_get_duplex_type(mac_drv, &duplex);
+ mac_info->duplex = duplex;
+
+ hns_gmac_get_port_mode(mac_drv, &speed);
+ switch (speed) {
+ case GMAC_10M_SGMII:
+ mac_info->speed = MAC_SPEED_10;
+ break;
+ case GMAC_100M_SGMII:
+ mac_info->speed = MAC_SPEED_100;
+ break;
+ case GMAC_1000M_SGMII:
+ mac_info->speed = MAC_SPEED_1000;
+ break;
+ default:
+ mac_info->speed = 0;
+ break;
+ }
+
+ hns_gmac_get_pausefrm_cfg(mac_drv, &rx_pause, &tx_pause);
+ mac_info->rx_pause_en = rx_pause;
+ mac_info->tx_pause_en = tx_pause;
+}
+
+static void hns_gmac_autoneg_stat(void *mac_drv, u32 *enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *enable = dsaf_get_dev_bit(drv, GMAC_TRANSMIT_CONTROL_REG,
+ GMAC_TX_AN_EN_B);
+}
+
+static void hns_gmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *link_stat = dsaf_get_dev_bit(drv, GMAC_AN_NEG_STATE_REG,
+ GMAC_AN_NEG_STAT_RX_SYNC_OK_B);
+}
+
+static void hns_gmac_get_regs(void *mac_drv, void *data)
+{
+ u32 *regs = data;
+ int i;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ /* base config registers */
+ regs[0] = dsaf_read_dev(drv, GMAC_DUPLEX_TYPE_REG);
+ regs[1] = dsaf_read_dev(drv, GMAC_FD_FC_TYPE_REG);
+ regs[2] = dsaf_read_dev(drv, GMAC_FC_TX_TIMER_REG);
+ regs[3] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_LOW_REG);
+ regs[4] = dsaf_read_dev(drv, GMAC_FD_FC_ADDR_HIGH_REG);
+ regs[5] = dsaf_read_dev(drv, GMAC_IPG_TX_TIMER_REG);
+ regs[6] = dsaf_read_dev(drv, GMAC_PAUSE_THR_REG);
+ regs[7] = dsaf_read_dev(drv, GMAC_MAX_FRM_SIZE_REG);
+ regs[8] = dsaf_read_dev(drv, GMAC_PORT_MODE_REG);
+ regs[9] = dsaf_read_dev(drv, GMAC_PORT_EN_REG);
+ regs[10] = dsaf_read_dev(drv, GMAC_PAUSE_EN_REG);
+ regs[11] = dsaf_read_dev(drv, GMAC_SHORT_RUNTS_THR_REG);
+ regs[12] = dsaf_read_dev(drv, GMAC_AN_NEG_STATE_REG);
+ regs[13] = dsaf_read_dev(drv, GMAC_TX_LOCAL_PAGE_REG);
+ regs[14] = dsaf_read_dev(drv, GMAC_TRANSMIT_CONTROL_REG);
+ regs[15] = dsaf_read_dev(drv, GMAC_REC_FILT_CONTROL_REG);
+ regs[16] = dsaf_read_dev(drv, GMAC_PTP_CONFIG_REG);
+
+ /* rx static registers */
+ regs[17] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_OK_REG);
+ regs[18] = dsaf_read_dev(drv, GMAC_RX_OCTETS_BAD_REG);
+ regs[19] = dsaf_read_dev(drv, GMAC_RX_UC_PKTS_REG);
+ regs[20] = dsaf_read_dev(drv, GMAC_RX_MC_PKTS_REG);
+ regs[21] = dsaf_read_dev(drv, GMAC_RX_BC_PKTS_REG);
+ regs[22] = dsaf_read_dev(drv, GMAC_RX_PKTS_64OCTETS_REG);
+ regs[23] = dsaf_read_dev(drv, GMAC_RX_PKTS_65TO127OCTETS_REG);
+ regs[24] = dsaf_read_dev(drv, GMAC_RX_PKTS_128TO255OCTETS_REG);
+ regs[25] = dsaf_read_dev(drv, GMAC_RX_PKTS_255TO511OCTETS_REG);
+ regs[26] = dsaf_read_dev(drv, GMAC_RX_PKTS_512TO1023OCTETS_REG);
+ regs[27] = dsaf_read_dev(drv, GMAC_RX_PKTS_1024TO1518OCTETS_REG);
+ regs[28] = dsaf_read_dev(drv, GMAC_RX_PKTS_1519TOMAXOCTETS_REG);
+ regs[29] = dsaf_read_dev(drv, GMAC_RX_FCS_ERRORS_REG);
+ regs[30] = dsaf_read_dev(drv, GMAC_RX_TAGGED_REG);
+ regs[31] = dsaf_read_dev(drv, GMAC_RX_DATA_ERR_REG);
+ regs[32] = dsaf_read_dev(drv, GMAC_RX_ALIGN_ERRORS_REG);
+ regs[33] = dsaf_read_dev(drv, GMAC_RX_LONG_ERRORS_REG);
+ regs[34] = dsaf_read_dev(drv, GMAC_RX_JABBER_ERRORS_REG);
+ regs[35] = dsaf_read_dev(drv, GMAC_RX_PAUSE_MACCTRL_FRAM_REG);
+ regs[36] = dsaf_read_dev(drv, GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG);
+ regs[37] = dsaf_read_dev(drv, GMAC_RX_VERY_LONG_ERR_CNT_REG);
+ regs[38] = dsaf_read_dev(drv, GMAC_RX_RUNT_ERR_CNT_REG);
+ regs[39] = dsaf_read_dev(drv, GMAC_RX_SHORT_ERR_CNT_REG);
+ regs[40] = dsaf_read_dev(drv, GMAC_RX_FILT_PKT_CNT_REG);
+ regs[41] = dsaf_read_dev(drv, GMAC_RX_OCTETS_TOTAL_FILT_REG);
+
+ /* tx static registers */
+ regs[42] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_OK_REG);
+ regs[43] = dsaf_read_dev(drv, GMAC_OCTETS_TRANSMITTED_BAD_REG);
+ regs[44] = dsaf_read_dev(drv, GMAC_TX_UC_PKTS_REG);
+ regs[45] = dsaf_read_dev(drv, GMAC_TX_MC_PKTS_REG);
+ regs[46] = dsaf_read_dev(drv, GMAC_TX_BC_PKTS_REG);
+ regs[47] = dsaf_read_dev(drv, GMAC_TX_PKTS_64OCTETS_REG);
+ regs[48] = dsaf_read_dev(drv, GMAC_TX_PKTS_65TO127OCTETS_REG);
+ regs[49] = dsaf_read_dev(drv, GMAC_TX_PKTS_128TO255OCTETS_REG);
+ regs[50] = dsaf_read_dev(drv, GMAC_TX_PKTS_255TO511OCTETS_REG);
+ regs[51] = dsaf_read_dev(drv, GMAC_TX_PKTS_512TO1023OCTETS_REG);
+ regs[52] = dsaf_read_dev(drv, GMAC_TX_PKTS_1024TO1518OCTETS_REG);
+ regs[53] = dsaf_read_dev(drv, GMAC_TX_PKTS_1519TOMAXOCTETS_REG);
+ regs[54] = dsaf_read_dev(drv, GMAC_TX_EXCESSIVE_LENGTH_DROP_REG);
+ regs[55] = dsaf_read_dev(drv, GMAC_TX_UNDERRUN_REG);
+ regs[56] = dsaf_read_dev(drv, GMAC_TX_TAGGED_REG);
+ regs[57] = dsaf_read_dev(drv, GMAC_TX_CRC_ERROR_REG);
+ regs[58] = dsaf_read_dev(drv, GMAC_TX_PAUSE_FRAMES_REG);
+
+ regs[59] = dsaf_read_dev(drv, GAMC_RX_MAX_FRAME);
+ regs[60] = dsaf_read_dev(drv, GMAC_LINE_LOOP_BACK_REG);
+ regs[61] = dsaf_read_dev(drv, GMAC_CF_CRC_STRIP_REG);
+ regs[62] = dsaf_read_dev(drv, GMAC_MODE_CHANGE_EN_REG);
+ regs[63] = dsaf_read_dev(drv, GMAC_SIXTEEN_BIT_CNTR_REG);
+ regs[64] = dsaf_read_dev(drv, GMAC_LD_LINK_COUNTER_REG);
+ regs[65] = dsaf_read_dev(drv, GMAC_LOOP_REG);
+ regs[66] = dsaf_read_dev(drv, GMAC_RECV_CONTROL_REG);
+ regs[67] = dsaf_read_dev(drv, GMAC_VLAN_CODE_REG);
+ regs[68] = dsaf_read_dev(drv, GMAC_RX_OVERRUN_CNT_REG);
+ regs[69] = dsaf_read_dev(drv, GMAC_RX_LENGTHFIELD_ERR_CNT_REG);
+ regs[70] = dsaf_read_dev(drv, GMAC_RX_FAIL_COMMA_CNT_REG);
+
+ regs[71] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_0_REG);
+ regs[72] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_0_REG);
+ regs[73] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_1_REG);
+ regs[74] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_1_REG);
+ regs[75] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_2_REG);
+ regs[76] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_2_REG);
+ regs[77] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_3_REG);
+ regs[78] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_3_REG);
+ regs[79] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_4_REG);
+ regs[80] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_4_REG);
+ regs[81] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_5_REG);
+ regs[82] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_5_REG);
+ regs[83] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_0_REG);
+ regs[84] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_0_REG);
+ regs[85] = dsaf_read_dev(drv, GMAC_STATION_ADDR_LOW_MSK_1_REG);
+ regs[86] = dsaf_read_dev(drv, GMAC_STATION_ADDR_HIGH_MSK_1_REG);
+ regs[87] = dsaf_read_dev(drv, GMAC_MAC_SKIP_LEN_REG);
+ regs[88] = dsaf_read_dev(drv, GMAC_TX_LOOP_PKT_PRI_REG);
+
+ /* mark end of mac regs */
+ for (i = 89; i < 96; i++)
+ regs[i] = 0xaaaaaaaa;
+}
+
+static void hns_gmac_get_stats(void *mac_drv, u64 *data)
+{
+ u32 i;
+ u64 *buf = data;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = NULL;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+ buf[i] = DSAF_STATS_READ(hw_stats,
+ g_gmac_stats_string[i].offset);
+ }
+}
+
+static void hns_gmac_get_strings(u32 stringset, u8 *data)
+{
+ char *buff = (char *)data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(g_gmac_stats_string); i++) {
+ snprintf(buff, ETH_GSTRING_LEN, "%s",
+ g_gmac_stats_string[i].desc);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+}
+
+static int hns_gmac_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ARRAY_SIZE(g_gmac_stats_string);
+
+ return 0;
+}
+
+static int hns_gmac_get_regs_count(void)
+{
+ return ETH_GMAC_DUMP_NUM;
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+ struct mac_driver *mac_drv;
+
+ mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+ if (!mac_drv)
+ return NULL;
+
+ mac_drv->mac_init = hns_gmac_init;
+ mac_drv->mac_enable = hns_gmac_enable;
+ mac_drv->mac_disable = hns_gmac_disable;
+ mac_drv->mac_free = hns_gmac_free;
+ mac_drv->adjust_link = hns_gmac_adjust_link;
+ mac_drv->need_adjust_link = hns_gmac_need_adjust_link;
+ mac_drv->set_tx_auto_pause_frames = hns_gmac_set_tx_auto_pause_frames;
+ mac_drv->config_max_frame_length = hns_gmac_config_max_frame_length;
+ mac_drv->mac_pausefrm_cfg = hns_gmac_pause_frm_cfg;
+
+ mac_drv->mac_id = mac_param->mac_id;
+ mac_drv->mac_mode = mac_param->mac_mode;
+ mac_drv->io_base = mac_param->vaddr;
+ mac_drv->dev = mac_param->dev;
+ mac_drv->mac_cb = mac_cb;
+
+ mac_drv->set_mac_addr = hns_gmac_set_mac_addr;
+ mac_drv->set_an_mode = hns_gmac_config_an_mode;
+ mac_drv->config_loopback = hns_gmac_config_loopback;
+ mac_drv->config_pad_and_crc = hns_gmac_config_pad_and_crc;
+ mac_drv->config_half_duplex = hns_gmac_set_duplex_type;
+ mac_drv->set_rx_ignore_pause_frames = hns_gmac_set_rx_auto_pause_frames;
+ mac_drv->get_info = hns_gmac_get_info;
+ mac_drv->autoneg_stat = hns_gmac_autoneg_stat;
+ mac_drv->get_pause_enable = hns_gmac_get_pausefrm_cfg;
+ mac_drv->get_link_status = hns_gmac_get_link_status;
+ mac_drv->get_regs = hns_gmac_get_regs;
+ mac_drv->get_regs_count = hns_gmac_get_regs_count;
+ mac_drv->get_ethtool_stats = hns_gmac_get_stats;
+ mac_drv->get_sset_count = hns_gmac_get_sset_count;
+ mac_drv->get_strings = hns_gmac_get_strings;
+ mac_drv->update_stats = hns_gmac_update_stats;
+ mac_drv->set_promiscuous = hns_gmac_set_promisc;
+ mac_drv->wait_fifo_clean = hns_gmac_wait_fifo_clean;
+
+ return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
new file mode 100644
index 000000000..ec266e7ff
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_GMAC_H
+#define _HNS_GMAC_H
+
+#include "hns_dsaf_mac.h"
+
+enum hns_port_mode {
+ GMAC_10M_MII = 0,
+ GMAC_100M_MII,
+ GMAC_1000M_GMII,
+ GMAC_10M_RGMII,
+ GMAC_100M_RGMII,
+ GMAC_1000M_RGMII,
+ GMAC_10M_SGMII,
+ GMAC_100M_SGMII,
+ GMAC_1000M_SGMII,
+ GMAC_10000M_SGMII /* 10GE */
+};
+
+enum hns_gmac_duplex_mdoe {
+ GMAC_HALF_DUPLEX_MODE = 0,
+ GMAC_FULL_DUPLEX_MODE
+};
+
+struct hns_gmac_port_mode_cfg {
+ enum hns_port_mode port_mode;
+ u32 max_frm_size;
+ u32 short_runts_thr;
+ u32 pad_enable;
+ u32 crc_add;
+ u32 an_enable; /*auto-nego enable */
+ u32 runt_pkt_en;
+ u32 strip_pad_en;
+};
+
+#define ETH_GMAC_DUMP_NUM 96
+#endif /* __HNS_GMAC_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
new file mode 100644
index 000000000..1f44a6463
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.c
@@ -0,0 +1,1256 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_rcb.h"
+
+#define MAC_EN_FLAG_V 0xada0328
+
+static const u16 mac_phy_to_speed[] = {
+ [PHY_INTERFACE_MODE_MII] = MAC_SPEED_100,
+ [PHY_INTERFACE_MODE_GMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_SGMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_TBI] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RMII] = MAC_SPEED_100,
+ [PHY_INTERFACE_MODE_RGMII] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_RTBI] = MAC_SPEED_1000,
+ [PHY_INTERFACE_MODE_XGMII] = MAC_SPEED_10000
+};
+
+static const enum mac_mode g_mac_mode_100[] = {
+ [PHY_INTERFACE_MODE_MII] = MAC_MODE_MII_100,
+ [PHY_INTERFACE_MODE_RMII] = MAC_MODE_RMII_100
+};
+
+static const enum mac_mode g_mac_mode_1000[] = {
+ [PHY_INTERFACE_MODE_GMII] = MAC_MODE_GMII_1000,
+ [PHY_INTERFACE_MODE_SGMII] = MAC_MODE_SGMII_1000,
+ [PHY_INTERFACE_MODE_TBI] = MAC_MODE_TBI_1000,
+ [PHY_INTERFACE_MODE_RGMII] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_ID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_RXID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RGMII_TXID] = MAC_MODE_RGMII_1000,
+ [PHY_INTERFACE_MODE_RTBI] = MAC_MODE_RTBI_1000
+};
+
+static enum mac_mode hns_get_enet_interface(const struct hns_mac_cb *mac_cb)
+{
+ switch (mac_cb->max_speed) {
+ case MAC_SPEED_100:
+ return g_mac_mode_100[mac_cb->phy_if];
+ case MAC_SPEED_1000:
+ return g_mac_mode_1000[mac_cb->phy_if];
+ case MAC_SPEED_10000:
+ return MAC_MODE_XGMII_10000;
+ default:
+ return MAC_MODE_MII_100;
+ }
+}
+
+static u32 hns_mac_link_anti_shake(struct mac_driver *mac_ctrl_drv)
+{
+#define HNS_MAC_LINK_WAIT_TIME 5
+#define HNS_MAC_LINK_WAIT_CNT 40
+
+ u32 link_status = 0;
+ int i;
+
+ if (!mac_ctrl_drv->get_link_status)
+ return link_status;
+
+ for (i = 0; i < HNS_MAC_LINK_WAIT_CNT; i++) {
+ msleep(HNS_MAC_LINK_WAIT_TIME);
+ mac_ctrl_drv->get_link_status(mac_ctrl_drv, &link_status);
+ if (!link_status)
+ break;
+ }
+
+ return link_status;
+}
+
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status)
+{
+ struct mac_driver *mac_ctrl_drv;
+ int ret, sfp_prsnt;
+
+ mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->get_link_status)
+ mac_ctrl_drv->get_link_status(mac_ctrl_drv, link_status);
+ else
+ *link_status = 0;
+
+ if (mac_cb->media_type == HNAE_MEDIA_TYPE_FIBER) {
+ ret = mac_cb->dsaf_dev->misc_op->get_sfp_prsnt(mac_cb,
+ &sfp_prsnt);
+ if (!ret)
+ *link_status = *link_status && sfp_prsnt;
+
+ /* for FIBER port, it may have a fake link up.
+ * when the link status changes from down to up, we need to do
+ * anti-shake. the anti-shake time is base on tests.
+ * only FIBER port need to do this.
+ */
+ if (*link_status && !mac_cb->link)
+ *link_status = hns_mac_link_anti_shake(mac_ctrl_drv);
+ }
+
+ mac_cb->link = *link_status;
+}
+
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+ u8 *auto_neg, u16 *speed, u8 *duplex)
+{
+ struct mac_driver *mac_ctrl_drv;
+ struct mac_info info;
+
+ mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (!mac_ctrl_drv->get_info)
+ return -ENODEV;
+
+ mac_ctrl_drv->get_info(mac_ctrl_drv, &info);
+ if (auto_neg)
+ *auto_neg = info.auto_neg;
+ if (speed)
+ *speed = info.speed;
+ if (duplex)
+ *duplex = info.duplex;
+
+ return 0;
+}
+
+/**
+ *hns_mac_is_adjust_link - check is need change mac speed and duplex register
+ *@mac_cb: mac device
+ *@speed: phy device speed
+ *@duplex:phy device duplex
+ *
+ */
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+ struct mac_driver *mac_ctrl_drv;
+
+ mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+ if (mac_ctrl_drv->need_adjust_link)
+ return mac_ctrl_drv->need_adjust_link(mac_ctrl_drv,
+ (enum mac_speed)speed, duplex);
+ else
+ return true;
+}
+
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex)
+{
+ int ret;
+ struct mac_driver *mac_ctrl_drv;
+
+ mac_ctrl_drv = (struct mac_driver *)(mac_cb->priv.mac);
+
+ mac_cb->speed = speed;
+ mac_cb->half_duplex = !duplex;
+
+ if (mac_ctrl_drv->adjust_link) {
+ ret = mac_ctrl_drv->adjust_link(mac_ctrl_drv,
+ (enum mac_speed)speed, duplex);
+ if (ret) {
+ dev_err(mac_cb->dev,
+ "adjust_link failed, %s mac%d ret = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, ret);
+ return;
+ }
+ }
+}
+
+/**
+ *hns_mac_get_inner_port_num - get mac table inner port number
+ *@mac_cb: mac device
+ *@vmid: vm id
+ *@port_num:port number
+ *
+ */
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb, u8 vmid, u8 *port_num)
+{
+ int q_num_per_vf, vf_num_per_port;
+ int vm_queue_id;
+ u8 tmp_port;
+
+ if (mac_cb->dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE) {
+ if (mac_cb->mac_id != DSAF_MAX_PORT_NUM) {
+ dev_err(mac_cb->dev,
+ "input invalid, %s mac%d vmid%d !\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+ } else if (mac_cb->dsaf_dev->dsaf_mode < DSAF_MODE_MAX) {
+ if (mac_cb->mac_id >= DSAF_MAX_PORT_NUM) {
+ dev_err(mac_cb->dev,
+ "input invalid, %s mac%d vmid%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+ } else {
+ dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+ return -EINVAL;
+ }
+
+ if (vmid >= mac_cb->dsaf_dev->rcb_common[0]->max_vfn) {
+ dev_err(mac_cb->dev, "input invalid, %s mac%d vmid%d !\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id, vmid);
+ return -EINVAL;
+ }
+
+ q_num_per_vf = mac_cb->dsaf_dev->rcb_common[0]->max_q_per_vf;
+ vf_num_per_port = mac_cb->dsaf_dev->rcb_common[0]->max_vfn;
+
+ vm_queue_id = vmid * q_num_per_vf +
+ vf_num_per_port * q_num_per_vf * mac_cb->mac_id;
+
+ switch (mac_cb->dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ tmp_port = 0;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ tmp_port = 0;
+ break;
+ case DSAF_MODE_ENABLE_0VM:
+ case DSAF_MODE_ENABLE_8VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_ENABLE_128VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ tmp_port = vm_queue_id;
+ break;
+ default:
+ dev_err(mac_cb->dev, "dsaf mode invalid, %s mac%d!\n",
+ mac_cb->dsaf_dev->ae_dev.name, mac_cb->mac_id);
+ return -EINVAL;
+ }
+ tmp_port += DSAF_BASE_INNER_PORT_NUM;
+
+ *port_num = tmp_port;
+
+ return 0;
+}
+
+/**
+ *hns_mac_change_vf_addr - change vf mac address
+ *@mac_cb: mac device
+ *@vmid: vmid
+ *@addr:mac address
+ */
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb,
+ u32 vmid, char *addr)
+{
+ int ret;
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ struct mac_entry_idx *old_entry;
+
+ old_entry = &mac_cb->addr_entry_idx[vmid];
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = old_entry->vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, (u8)vmid,
+ &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ if ((old_entry->valid != 0) &&
+ (memcmp(old_entry->addr,
+ addr, sizeof(mac_entry.addr)) != 0)) {
+ ret = hns_dsaf_del_mac_entry(dsaf_dev,
+ old_entry->vlan_id,
+ mac_cb->mac_id,
+ old_entry->addr);
+ if (ret)
+ return ret;
+ }
+
+ ret = hns_dsaf_set_mac_uc_entry(dsaf_dev, &mac_entry);
+ if (ret)
+ return ret;
+ }
+
+ if ((mac_ctrl_drv->set_mac_addr) && (vmid == 0))
+ mac_ctrl_drv->set_mac_addr(mac_cb->priv.mac, addr);
+
+ memcpy(old_entry->addr, addr, sizeof(old_entry->addr));
+ old_entry->valid = 1;
+ return 0;
+}
+
+int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr)
+{
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ int ret;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return -ENOSPC;
+
+ memset(&mac_entry, 0, sizeof(mac_entry));
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vf_id, &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ return hns_dsaf_set_mac_uc_entry(dsaf_dev, &mac_entry);
+}
+
+int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr)
+{
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+ int ret;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return -ENOSPC;
+
+ memset(&mac_entry, 0, sizeof(mac_entry));
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vf_id, &mac_entry.port_num);
+ if (ret)
+ return ret;
+
+ return hns_dsaf_rm_mac_addr(dsaf_dev, &mac_entry);
+}
+
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+ u32 port_num, char *addr, bool enable)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev) && addr) {
+ memcpy(mac_entry.addr, addr, sizeof(mac_entry.addr));
+ mac_entry.in_vlan_id = 0;/*vlan_id;*/
+ mac_entry.in_port_num = mac_cb->mac_id;
+ mac_entry.port_num = port_num;
+
+ if (!enable)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "set mac mc port failed, %s mac%d ret = %#x!\n",
+ mac_cb->dsaf_dev->ae_dev.name,
+ mac_cb->mac_id, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn)
+{
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ u8 port_num;
+ int ret = hns_mac_get_inner_port_num(mac_cb, vfn, &port_num);
+
+ if (ret)
+ return ret;
+
+ return hns_dsaf_clr_mac_mc_port(dsaf_dev, mac_cb->mac_id, port_num);
+}
+
+static void hns_mac_param_get(struct mac_params *param,
+ struct hns_mac_cb *mac_cb)
+{
+ param->vaddr = mac_cb->vaddr;
+ param->mac_mode = hns_get_enet_interface(mac_cb);
+ ether_addr_copy(param->addr, mac_cb->addr_entry_idx[0].addr);
+ param->mac_id = mac_cb->mac_id;
+ param->dev = mac_cb->dev;
+}
+
+/**
+ * hns_mac_queue_config_bc_en - set broadcast rx&tx enable
+ * @mac_cb: mac device
+ * @port_num: queue number
+ * @vlan_id: vlan id`
+ * @enable: enable
+ * return 0 - success , negative --fail
+ */
+static int hns_mac_port_config_bc_en(struct hns_mac_cb *mac_cb,
+ u32 port_num, u16 vlan_id, bool enable)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ /* directy return ok in debug network mode */
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ return 0;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ eth_broadcast_addr(mac_entry.addr);
+ mac_entry.in_vlan_id = vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ mac_entry.port_num = port_num;
+
+ if (!enable)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_mac_vm_config_bc_en - set broadcast rx&tx enable
+ * @mac_cb: mac device
+ * @vmid: vm id
+ * @enable: enable
+ * return 0 - success , negative --fail
+ */
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vmid, bool enable)
+{
+ int ret;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ u8 port_num;
+ struct mac_entry_idx *uc_mac_entry;
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ return 0;
+
+ uc_mac_entry = &mac_cb->addr_entry_idx[vmid];
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ eth_broadcast_addr(mac_entry.addr);
+ mac_entry.in_vlan_id = uc_mac_entry->vlan_id;
+ mac_entry.in_port_num = mac_cb->mac_id;
+ ret = hns_mac_get_inner_port_num(mac_cb, vmid, &port_num);
+ if (ret)
+ return ret;
+ mac_entry.port_num = port_num;
+
+ if (!enable)
+ ret = hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry);
+ else
+ ret = hns_dsaf_add_mac_mc_port(dsaf_dev, &mac_entry);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+ if (drv->wait_fifo_clean)
+ return drv->wait_fifo_clean(drv);
+
+ return 0;
+}
+
+void hns_mac_reset(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
+
+ drv->mac_init(drv);
+
+ if (drv->config_max_frame_length)
+ drv->config_max_frame_length(drv, mac_cb->max_frm);
+
+ if (drv->set_tx_auto_pause_frames)
+ drv->set_tx_auto_pause_frames(drv, mac_cb->tx_pause_frm_time);
+
+ if (drv->set_an_mode)
+ drv->set_an_mode(drv, 1);
+
+ if (drv->mac_pausefrm_cfg) {
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG)
+ drv->mac_pausefrm_cfg(drv, !is_ver1, !is_ver1);
+ else /* mac rx must disable, dsaf pfc close instead of it*/
+ drv->mac_pausefrm_cfg(drv, 0, 1);
+ }
+}
+
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size)
+{
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+ u32 new_frm = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+
+ if (new_frm > HNS_RCB_RING_MAX_BD_PER_PKT * buf_size)
+ return -EINVAL;
+
+ if (!drv->config_max_frame_length)
+ return -ECHILD;
+
+ /* adjust max frame to be at least the size of a standard frame */
+ if (new_frm < (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN))
+ new_frm = (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN);
+
+ drv->config_max_frame_length(drv, new_frm);
+
+ mac_cb->max_frm = new_frm;
+
+ return 0;
+}
+
+void hns_mac_start(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_drv = hns_mac_get_drv(mac_cb);
+
+ /* for virt */
+ if (mac_drv->mac_en_flg == MAC_EN_FLAG_V) {
+ /*plus 1 when the virtual mac has been enabled */
+ mac_drv->virt_dev_num += 1;
+ return;
+ }
+
+ if (mac_drv->mac_enable) {
+ mac_drv->mac_enable(mac_cb->priv.mac, MAC_COMM_MODE_RX_AND_TX);
+ mac_drv->mac_en_flg = MAC_EN_FLAG_V;
+ }
+}
+
+void hns_mac_stop(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ /*modified for virtualization */
+ if (mac_ctrl_drv->virt_dev_num > 0) {
+ mac_ctrl_drv->virt_dev_num -= 1;
+ if (mac_ctrl_drv->virt_dev_num > 0)
+ return;
+ }
+
+ if (mac_ctrl_drv->mac_disable)
+ mac_ctrl_drv->mac_disable(mac_cb->priv.mac,
+ MAC_COMM_MODE_RX_AND_TX);
+
+ mac_ctrl_drv->mac_en_flg = 0;
+ mac_cb->link = 0;
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
+}
+
+/**
+ * hns_mac_get_autoneg - get auto autonegotiation
+ * @mac_cb: mac control block
+ * @auto_neg: output pointer to autoneg result
+ * return 0 - success , negative --fail
+ */
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->autoneg_stat)
+ mac_ctrl_drv->autoneg_stat(mac_ctrl_drv, auto_neg);
+ else
+ *auto_neg = 0;
+}
+
+/**
+ * hns_mac_get_pauseparam - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable status
+ * @tx_en: tx enable status
+ * return 0 - success , negative --fail
+ */
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_ctrl_drv->get_pause_enable) {
+ mac_ctrl_drv->get_pause_enable(mac_ctrl_drv, rx_en, tx_en);
+ } else {
+ *rx_en = 0;
+ *tx_en = 0;
+ }
+}
+
+/**
+ * hns_mac_set_autoneg - set auto autonegotiation
+ * @mac_cb: mac control block
+ * @enable: enable or not
+ * return 0 - success , negative --fail
+ */
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII && enable) {
+ dev_err(mac_cb->dev, "enabling autoneg is not allowed!\n");
+ return -ENOTSUPP;
+ }
+
+ if (mac_ctrl_drv->set_an_mode)
+ mac_ctrl_drv->set_an_mode(mac_ctrl_drv, enable);
+
+ return 0;
+}
+
+/**
+ * hns_mac_set_autoneg - set rx & tx pause parameter
+ * @mac_cb: mac control block
+ * @rx_en: rx enable or not
+ * @tx_en: tx enable or not
+ * return 0 - success , negative --fail
+ */
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
+
+ if (mac_cb->mac_type == HNAE_PORT_DEBUG) {
+ if (is_ver1 && (tx_en || rx_en)) {
+ dev_err(mac_cb->dev, "macv1 can't enable tx/rx_pause!\n");
+ return -EINVAL;
+ }
+ }
+
+ if (mac_ctrl_drv->mac_pausefrm_cfg)
+ mac_ctrl_drv->mac_pausefrm_cfg(mac_ctrl_drv, rx_en, tx_en);
+
+ return 0;
+}
+
+/**
+ * hns_mac_init_ex - mac init
+ * @mac_cb: mac control block
+ * return 0 - success , negative --fail
+ */
+static int hns_mac_init_ex(struct hns_mac_cb *mac_cb)
+{
+ int ret;
+ struct mac_params param;
+ struct mac_driver *drv;
+
+ hns_dsaf_fix_mac_mode(mac_cb);
+
+ memset(&param, 0, sizeof(struct mac_params));
+ hns_mac_param_get(&param, mac_cb);
+
+ if (MAC_SPEED_FROM_MODE(param.mac_mode) < MAC_SPEED_10000)
+ drv = (struct mac_driver *)hns_gmac_config(mac_cb, &param);
+ else
+ drv = (struct mac_driver *)hns_xgmac_config(mac_cb, &param);
+
+ if (!drv)
+ return -ENOMEM;
+
+ mac_cb->priv.mac = (void *)drv;
+ hns_mac_reset(mac_cb);
+
+ hns_mac_adjust_link(mac_cb, mac_cb->speed, !mac_cb->half_duplex);
+
+ ret = hns_mac_port_config_bc_en(mac_cb, mac_cb->mac_id, 0, true);
+ if (ret)
+ goto free_mac_drv;
+
+ return 0;
+
+free_mac_drv:
+ drv->mac_free(mac_cb->priv.mac);
+ mac_cb->priv.mac = NULL;
+
+ return ret;
+}
+
+static int
+hns_mac_phy_parse_addr(struct device *dev, struct fwnode_handle *fwnode)
+{
+ u32 addr;
+ int ret;
+
+ ret = fwnode_property_read_u32(fwnode, "phy-addr", &addr);
+ if (ret) {
+ dev_err(dev, "has invalid PHY address ret:%d\n", ret);
+ return ret;
+ }
+
+ if (addr >= PHY_MAX_ADDR) {
+ dev_err(dev, "PHY address %i is too large\n", addr);
+ return -EINVAL;
+ }
+
+ return addr;
+}
+
+static int
+hns_mac_register_phydev(struct mii_bus *mdio, struct hns_mac_cb *mac_cb,
+ u32 addr)
+{
+ struct phy_device *phy;
+ const char *phy_type;
+ bool is_c45;
+ int rc;
+
+ rc = fwnode_property_read_string(mac_cb->fw_port,
+ "phy-mode", &phy_type);
+ if (rc < 0)
+ return rc;
+
+ if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_XGMII)))
+ is_c45 = true;
+ else if (!strcmp(phy_type, phy_modes(PHY_INTERFACE_MODE_SGMII)))
+ is_c45 = false;
+ else
+ return -ENODATA;
+
+ phy = get_phy_device(mdio, addr, is_c45);
+ if (!phy || IS_ERR(phy))
+ return -EIO;
+
+ phy->irq = mdio->irq[addr];
+
+ /* All data is now stored in the phy struct;
+ * register it
+ */
+ rc = phy_device_register(phy);
+ if (rc) {
+ phy_device_free(phy);
+ dev_err(&mdio->dev, "registered phy fail at address %i\n",
+ addr);
+ return -ENODEV;
+ }
+
+ mac_cb->phy_dev = phy;
+
+ dev_dbg(&mdio->dev, "registered phy at address %i\n", addr);
+
+ return 0;
+}
+
+static int hns_mac_register_phy(struct hns_mac_cb *mac_cb)
+{
+ struct fwnode_reference_args args;
+ struct platform_device *pdev;
+ struct mii_bus *mii_bus;
+ int rc;
+ int addr;
+
+ /* Loop over the child nodes and register a phy_device for each one */
+ if (!to_acpi_device_node(mac_cb->fw_port))
+ return -ENODEV;
+
+ rc = acpi_node_get_property_reference(
+ mac_cb->fw_port, "mdio-node", 0, &args);
+ if (rc)
+ return rc;
+ if (!is_acpi_device_node(args.fwnode))
+ return -EINVAL;
+
+ addr = hns_mac_phy_parse_addr(mac_cb->dev, mac_cb->fw_port);
+ if (addr < 0)
+ return addr;
+
+ /* dev address in adev */
+ pdev = hns_dsaf_find_platform_device(args.fwnode);
+ if (!pdev) {
+ dev_err(mac_cb->dev, "mac%d mdio pdev is NULL\n",
+ mac_cb->mac_id);
+ return -EINVAL;
+ }
+
+ mii_bus = platform_get_drvdata(pdev);
+ if (!mii_bus) {
+ dev_err(mac_cb->dev,
+ "mac%d mdio is NULL, dsaf will probe again later\n",
+ mac_cb->mac_id);
+ return -EPROBE_DEFER;
+ }
+
+ rc = hns_mac_register_phydev(mii_bus, mac_cb, addr);
+ if (!rc)
+ dev_dbg(mac_cb->dev, "mac%d register phy addr:%d\n",
+ mac_cb->mac_id, addr);
+
+ return rc;
+}
+
+static void hns_mac_remove_phydev(struct hns_mac_cb *mac_cb)
+{
+ if (!to_acpi_device_node(mac_cb->fw_port) || !mac_cb->phy_dev)
+ return;
+
+ phy_device_remove(mac_cb->phy_dev);
+ phy_device_free(mac_cb->phy_dev);
+
+ mac_cb->phy_dev = NULL;
+}
+
+#define MAC_MEDIA_TYPE_MAX_LEN 16
+
+static const struct {
+ enum hnae_media_type value;
+ const char *name;
+} media_type_defs[] = {
+ {HNAE_MEDIA_TYPE_UNKNOWN, "unknown" },
+ {HNAE_MEDIA_TYPE_FIBER, "fiber" },
+ {HNAE_MEDIA_TYPE_COPPER, "copper" },
+ {HNAE_MEDIA_TYPE_BACKPLANE, "backplane" },
+};
+
+/**
+ *hns_mac_get_info - get mac information from device node
+ *@mac_cb: mac device
+ * return: 0 --success, negative --fail
+ */
+static int hns_mac_get_info(struct hns_mac_cb *mac_cb)
+{
+ struct device_node *np;
+ struct regmap *syscon;
+ struct of_phandle_args cpld_args;
+ const char *media_type;
+ u32 i;
+ u32 ret;
+
+ mac_cb->link = false;
+ mac_cb->half_duplex = false;
+ mac_cb->media_type = HNAE_MEDIA_TYPE_UNKNOWN;
+ mac_cb->speed = mac_phy_to_speed[mac_cb->phy_if];
+ mac_cb->max_speed = mac_cb->speed;
+
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ mac_cb->if_support = MAC_GMAC_SUPPORTED;
+ mac_cb->if_support |= SUPPORTED_1000baseT_Full;
+ } else if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ mac_cb->if_support = SUPPORTED_10000baseR_FEC;
+ mac_cb->if_support |= SUPPORTED_10000baseKR_Full;
+ }
+
+ mac_cb->max_frm = MAC_DEFAULT_MTU;
+ mac_cb->tx_pause_frm_time = MAC_DEFAULT_PAUSE_TIME;
+ mac_cb->port_rst_off = mac_cb->mac_id;
+ mac_cb->port_mode_off = 0;
+
+ /* if the dsaf node doesn't contain a port subnode, get phy-handle
+ * from dsaf node
+ */
+ if (!mac_cb->fw_port) {
+ np = of_parse_phandle(mac_cb->dev->of_node, "phy-handle",
+ mac_cb->mac_id);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n",
+ mac_cb->mac_id, np);
+ }
+ of_node_put(np);
+
+ return 0;
+ }
+
+ if (is_of_node(mac_cb->fw_port)) {
+ /* parse property from port subnode in dsaf */
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "phy-handle", 0);
+ mac_cb->phy_dev = of_phy_find_device(np);
+ if (mac_cb->phy_dev) {
+ /* refcount is held by of_phy_find_device()
+ * if the phy_dev is found
+ */
+ put_device(&mac_cb->phy_dev->mdio.dev);
+ dev_dbg(mac_cb->dev, "mac%d phy_node: %pOFn\n",
+ mac_cb->mac_id, np);
+ }
+ of_node_put(np);
+
+ np = of_parse_phandle(to_of_node(mac_cb->fw_port),
+ "serdes-syscon", 0);
+ syscon = syscon_node_to_regmap(np);
+ of_node_put(np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_err(mac_cb->dev, "serdes-syscon is needed!\n");
+ return -EINVAL;
+ }
+ mac_cb->serdes_ctrl = syscon;
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-rst-offset",
+ &mac_cb->port_rst_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-rst-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = fwnode_property_read_u32(mac_cb->fw_port,
+ "port-mode-offset",
+ &mac_cb->port_mode_off);
+ if (ret) {
+ dev_dbg(mac_cb->dev,
+ "mac%d port-mode-offset not found, use default value.\n",
+ mac_cb->mac_id);
+ }
+
+ ret = of_parse_phandle_with_fixed_args(
+ to_of_node(mac_cb->fw_port), "cpld-syscon", 1, 0,
+ &cpld_args);
+ if (ret) {
+ dev_dbg(mac_cb->dev, "mac%d no cpld-syscon found.\n",
+ mac_cb->mac_id);
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ syscon = syscon_node_to_regmap(cpld_args.np);
+ if (IS_ERR_OR_NULL(syscon)) {
+ dev_dbg(mac_cb->dev, "no cpld-syscon found!\n");
+ mac_cb->cpld_ctrl = NULL;
+ } else {
+ mac_cb->cpld_ctrl = syscon;
+ mac_cb->cpld_ctrl_reg = cpld_args.args[0];
+ }
+ }
+ } else if (is_acpi_node(mac_cb->fw_port)) {
+ ret = hns_mac_register_phy(mac_cb);
+ /*
+ * Mac can work well if there is phy or not.If the port don't
+ * connect with phy, the return value will be ignored. Only
+ * when there is phy but can't find mdio bus, the return value
+ * will be handled.
+ */
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ } else {
+ dev_err(mac_cb->dev, "mac%d cannot find phy node\n",
+ mac_cb->mac_id);
+ }
+
+ if (!fwnode_property_read_string(mac_cb->fw_port, "media-type",
+ &media_type)) {
+ for (i = 0; i < ARRAY_SIZE(media_type_defs); i++) {
+ if (!strncmp(media_type_defs[i].name, media_type,
+ MAC_MEDIA_TYPE_MAX_LEN)) {
+ mac_cb->media_type = media_type_defs[i].value;
+ break;
+ }
+ }
+ }
+
+ if (fwnode_property_read_u8_array(mac_cb->fw_port, "mc-mac-mask",
+ mac_cb->mc_mask, ETH_ALEN)) {
+ dev_warn(mac_cb->dev,
+ "no mc-mac-mask property, set to default value.\n");
+ eth_broadcast_addr(mac_cb->mc_mask);
+ }
+
+ return 0;
+}
+
+/**
+ * hns_mac_get_mode - get mac mode
+ * @phy_if: phy interface
+ * return 0 - gmac, 1 - xgmac , negative --fail
+ */
+static int hns_mac_get_mode(phy_interface_t phy_if)
+{
+ switch (phy_if) {
+ case PHY_INTERFACE_MODE_SGMII:
+ return MAC_GMAC_IDX;
+ case PHY_INTERFACE_MODE_XGMII:
+ return MAC_XGMAC_IDX;
+ default:
+ return -EINVAL;
+ }
+}
+
+static u8 __iomem *
+hns_mac_get_vaddr(struct dsaf_device *dsaf_dev,
+ struct hns_mac_cb *mac_cb, u32 mac_mode_idx)
+{
+ u8 __iomem *base = dsaf_dev->io_base;
+ int mac_id = mac_cb->mac_id;
+
+ if (mac_cb->mac_type == HNAE_PORT_SERVICE)
+ return base + 0x40000 + mac_id * 0x4000 -
+ mac_mode_idx * 0x20000;
+ else
+ return dsaf_dev->ppe_base + 0x1000;
+}
+
+/**
+ * hns_mac_get_cfg - get mac cfg from dtb or acpi table
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_cb: mac control block
+ * return 0 - success , negative --fail
+ */
+static int
+hns_mac_get_cfg(struct dsaf_device *dsaf_dev, struct hns_mac_cb *mac_cb)
+{
+ int ret;
+ u32 mac_mode_idx;
+
+ mac_cb->dsaf_dev = dsaf_dev;
+ mac_cb->dev = dsaf_dev->dev;
+
+ mac_cb->sys_ctl_vaddr = dsaf_dev->sc_base;
+ mac_cb->serdes_vaddr = dsaf_dev->sds_base;
+
+ mac_cb->sfp_prsnt = 0;
+ mac_cb->txpkt_for_led = 0;
+ mac_cb->rxpkt_for_led = 0;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ mac_cb->mac_type = HNAE_PORT_SERVICE;
+ else
+ mac_cb->mac_type = HNAE_PORT_DEBUG;
+
+ mac_cb->phy_if = dsaf_dev->misc_op->get_phy_if(mac_cb);
+
+ ret = hns_mac_get_mode(mac_cb->phy_if);
+ if (ret < 0) {
+ dev_err(dsaf_dev->dev,
+ "hns_mac_get_mode failed, mac%d ret = %#x!\n",
+ mac_cb->mac_id, ret);
+ return ret;
+ }
+ mac_mode_idx = (u32)ret;
+
+ ret = hns_mac_get_info(mac_cb);
+ if (ret)
+ return ret;
+
+ mac_cb->dsaf_dev->misc_op->cpld_reset_led(mac_cb);
+ mac_cb->vaddr = hns_mac_get_vaddr(dsaf_dev, mac_cb, mac_mode_idx);
+
+ return 0;
+}
+
+static int hns_mac_get_max_port_num(struct dsaf_device *dsaf_dev)
+{
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 1;
+ else
+ return DSAF_MAX_PORT_NUM;
+}
+
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->mac_enable(mac_cb->priv.mac, mode);
+}
+
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->mac_disable(mac_cb->priv.mac, mode);
+}
+
+/**
+ * hns_mac_init - init mac
+ * @dsaf_dev: dsa fabric device struct pointer
+ * return 0 - success , negative --fail
+ */
+int hns_mac_init(struct dsaf_device *dsaf_dev)
+{
+ bool found = false;
+ int ret;
+ u32 port_id;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+ struct hns_mac_cb *mac_cb;
+ struct fwnode_handle *child;
+
+ device_for_each_child_node(dsaf_dev->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &port_id);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "get reg fail, ret=%d!\n", ret);
+ return ret;
+ }
+ if (port_id >= max_port_num) {
+ dev_err(dsaf_dev->dev,
+ "reg(%u) out of range!\n", port_id);
+ return -EINVAL;
+ }
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb)
+ return -ENOMEM;
+ mac_cb->fw_port = child;
+ mac_cb->mac_id = (u8)port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ found = true;
+ }
+
+ /* if don't get any port subnode from dsaf node
+ * will init all port then, this is compatible with the old dts
+ */
+ if (!found) {
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = devm_kzalloc(dsaf_dev->dev, sizeof(*mac_cb),
+ GFP_KERNEL);
+ if (!mac_cb)
+ return -ENOMEM;
+
+ mac_cb->mac_id = port_id;
+ dsaf_dev->mac_cb[port_id] = mac_cb;
+ }
+ }
+
+ /* init mac_cb for all port */
+ for (port_id = 0; port_id < max_port_num; port_id++) {
+ mac_cb = dsaf_dev->mac_cb[port_id];
+ if (!mac_cb)
+ continue;
+
+ ret = hns_mac_get_cfg(dsaf_dev, mac_cb);
+ if (ret)
+ return ret;
+
+ ret = hns_mac_init_ex(mac_cb);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+void hns_mac_uninit(struct dsaf_device *dsaf_dev)
+{
+ int i;
+ int max_port_num = hns_mac_get_max_port_num(dsaf_dev);
+
+ for (i = 0; i < max_port_num; i++) {
+ if (!dsaf_dev->mac_cb[i])
+ continue;
+
+ dsaf_dev->misc_op->cpld_reset_led(dsaf_dev->mac_cb[i]);
+ hns_mac_remove_phydev(dsaf_dev->mac_cb[i]);
+ dsaf_dev->mac_cb[i] = NULL;
+ }
+}
+
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+ enum hnae_loop loop, int en)
+{
+ int ret;
+ struct mac_driver *drv = hns_mac_get_drv(mac_cb);
+
+ if (drv->config_loopback)
+ ret = drv->config_loopback(drv, loop, en);
+ else
+ ret = -ENOTSUPP;
+
+ return ret;
+}
+
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->update_stats(mac_ctrl_drv);
+}
+
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_ethtool_stats(mac_ctrl_drv, data);
+}
+
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb,
+ int stringset, u8 *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_strings(stringset, data);
+}
+
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ return mac_ctrl_drv->get_sset_count(stringset);
+}
+
+void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ hns_dsaf_set_promisc_tcam(mac_cb->dsaf_dev, mac_cb->mac_id, !!en);
+
+ if (mac_ctrl_drv->set_promiscuous)
+ mac_ctrl_drv->set_promiscuous(mac_ctrl_drv, en);
+}
+
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ return mac_ctrl_drv->get_regs_count();
+}
+
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data)
+{
+ struct mac_driver *mac_ctrl_drv = hns_mac_get_drv(mac_cb);
+
+ mac_ctrl_drv->get_regs(mac_ctrl_drv, data);
+}
+
+void hns_set_led_opt(struct hns_mac_cb *mac_cb)
+{
+ int nic_data = 0;
+ int txpkts, rxpkts;
+
+ txpkts = mac_cb->txpkt_for_led - mac_cb->hw_stats.tx_good_pkts;
+ rxpkts = mac_cb->rxpkt_for_led - mac_cb->hw_stats.rx_good_pkts;
+ if (txpkts || rxpkts)
+ nic_data = 1;
+ else
+ nic_data = 0;
+ mac_cb->txpkt_for_led = mac_cb->hw_stats.tx_good_pkts;
+ mac_cb->rxpkt_for_led = mac_cb->hw_stats.rx_good_pkts;
+ mac_cb->dsaf_dev->misc_op->cpld_set_led(mac_cb, (int)mac_cb->link,
+ mac_cb->speed, nic_data);
+}
+
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ if (!mac_cb)
+ return 0;
+
+ return mac_cb->dsaf_dev->misc_op->cpld_set_led_id(mac_cb, status);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
new file mode 100644
index 000000000..3278bf471
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_mac.h
@@ -0,0 +1,471 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_DSAF_MAC_H
+#define _HNS_DSAF_MAC_H
+
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include "hns_dsaf_main.h"
+
+struct dsaf_device;
+
+#define MAC_GMAC_SUPPORTED \
+ (SUPPORTED_10baseT_Half \
+ | SUPPORTED_10baseT_Full \
+ | SUPPORTED_100baseT_Half \
+ | SUPPORTED_100baseT_Full \
+ | SUPPORTED_Autoneg)
+
+#define MAC_DEFAULT_MTU (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN)
+#define MAC_MAX_MTU 9600
+#define MAC_MAX_MTU_V2 9728
+#define MAC_MIN_MTU 68
+#define MAC_MAX_MTU_DBG MAC_DEFAULT_MTU
+
+#define MAC_DEFAULT_PAUSE_TIME 0xffff
+
+#define MAC_GMAC_IDX 0
+#define MAC_XGMAC_IDX 1
+
+#define ETH_STATIC_REG 1
+#define ETH_DUMP_REG 5
+/* check mac addr broadcast */
+#define MAC_IS_BROADCAST(p) ((*(p) == 0xff) && (*((p) + 1) == 0xff) && \
+ (*((p) + 2) == 0xff) && (*((p) + 3) == 0xff) && \
+ (*((p) + 4) == 0xff) && (*((p) + 5) == 0xff))
+
+/* check mac addr is 01-00-5e-xx-xx-xx*/
+#define MAC_IS_L3_MULTICAST(p) ((*((p) + 0) == 0x01) && \
+ (*((p) + 1) == 0x00) && \
+ (*((p) + 2) == 0x5e))
+
+/*check the mac addr is 0 in all bit*/
+#define MAC_IS_ALL_ZEROS(p) ((*(p) == 0) && (*((p) + 1) == 0) && \
+ (*((p) + 2) == 0) && (*((p) + 3) == 0) && \
+ (*((p) + 4) == 0) && (*((p) + 5) == 0))
+
+/*check mac addr multicast*/
+#define MAC_IS_MULTICAST(p) ((*((u8 *)((p) + 0)) & 0x01) ? (1) : (0))
+
+struct mac_priv {
+ void *mac;
+};
+
+/* net speed */
+enum mac_speed {
+ MAC_SPEED_10 = 10, /**< 10 Mbps */
+ MAC_SPEED_100 = 100, /**< 100 Mbps */
+ MAC_SPEED_1000 = 1000, /**< 1000 Mbps = 1 Gbps */
+ MAC_SPEED_10000 = 10000 /**< 10000 Mbps = 10 Gbps */
+};
+
+/*mac interface keyword */
+enum mac_intf {
+ MAC_IF_NONE = 0x00000000, /**< interface not invalid */
+ MAC_IF_MII = 0x00010000, /**< MII interface */
+ MAC_IF_RMII = 0x00020000, /**< RMII interface */
+ MAC_IF_SMII = 0x00030000, /**< SMII interface */
+ MAC_IF_GMII = 0x00040000, /**< GMII interface */
+ MAC_IF_RGMII = 0x00050000, /**< RGMII interface */
+ MAC_IF_TBI = 0x00060000, /**< TBI interface */
+ MAC_IF_RTBI = 0x00070000, /**< RTBI interface */
+ MAC_IF_SGMII = 0x00080000, /**< SGMII interface */
+ MAC_IF_XGMII = 0x00090000, /**< XGMII interface */
+ MAC_IF_QSGMII = 0x000a0000 /**< QSGMII interface */
+};
+
+/*mac mode */
+enum mac_mode {
+ /**< Invalid Ethernet mode */
+ MAC_MODE_INVALID = 0,
+ /**< 10 Mbps MII */
+ MAC_MODE_MII_10 = (MAC_IF_MII | MAC_SPEED_10),
+ /**< 100 Mbps MII */
+ MAC_MODE_MII_100 = (MAC_IF_MII | MAC_SPEED_100),
+ /**< 10 Mbps RMII */
+ MAC_MODE_RMII_10 = (MAC_IF_RMII | MAC_SPEED_10),
+ /**< 100 Mbps RMII */
+ MAC_MODE_RMII_100 = (MAC_IF_RMII | MAC_SPEED_100),
+ /**< 10 Mbps SMII */
+ MAC_MODE_SMII_10 = (MAC_IF_SMII | MAC_SPEED_10),
+ /**< 100 Mbps SMII */
+ MAC_MODE_SMII_100 = (MAC_IF_SMII | MAC_SPEED_100),
+ /**< 1000 Mbps GMII */
+ MAC_MODE_GMII_1000 = (MAC_IF_GMII | MAC_SPEED_1000),
+ /**< 10 Mbps RGMII */
+ MAC_MODE_RGMII_10 = (MAC_IF_RGMII | MAC_SPEED_10),
+ /**< 100 Mbps RGMII */
+ MAC_MODE_RGMII_100 = (MAC_IF_RGMII | MAC_SPEED_100),
+ /**< 1000 Mbps RGMII */
+ MAC_MODE_RGMII_1000 = (MAC_IF_RGMII | MAC_SPEED_1000),
+ /**< 1000 Mbps TBI */
+ MAC_MODE_TBI_1000 = (MAC_IF_TBI | MAC_SPEED_1000),
+ /**< 1000 Mbps RTBI */
+ MAC_MODE_RTBI_1000 = (MAC_IF_RTBI | MAC_SPEED_1000),
+ /**< 10 Mbps SGMII */
+ MAC_MODE_SGMII_10 = (MAC_IF_SGMII | MAC_SPEED_10),
+ /**< 100 Mbps SGMII */
+ MAC_MODE_SGMII_100 = (MAC_IF_SGMII | MAC_SPEED_100),
+ /**< 1000 Mbps SGMII */
+ MAC_MODE_SGMII_1000 = (MAC_IF_SGMII | MAC_SPEED_1000),
+ /**< 10000 Mbps XGMII */
+ MAC_MODE_XGMII_10000 = (MAC_IF_XGMII | MAC_SPEED_10000),
+ /**< 1000 Mbps QSGMII */
+ MAC_MODE_QSGMII_1000 = (MAC_IF_QSGMII | MAC_SPEED_1000)
+};
+
+/*mac communicate mode*/
+enum mac_commom_mode {
+ MAC_COMM_MODE_NONE = 0, /**< No transmit/receive communication */
+ MAC_COMM_MODE_RX = 1, /**< Only receive communication */
+ MAC_COMM_MODE_TX = 2, /**< Only transmit communication */
+ MAC_COMM_MODE_RX_AND_TX = 3 /**< Both tx and rx communication */
+};
+
+/*mac statistics */
+struct mac_statistics {
+ u64 stat_pkts64; /* r-10G tr-DT 64 byte frame counter */
+ u64 stat_pkts65to127; /* r-10G 65 to 127 byte frame counter */
+ u64 stat_pkts128to255; /* r-10G 128 to 255 byte frame counter */
+ u64 stat_pkts256to511; /*r-10G 256 to 511 byte frame counter */
+ u64 stat_pkts512to1023;/* r-10G 512 to 1023 byte frame counter */
+ u64 stat_pkts1024to1518; /* r-10G 1024 to 1518 byte frame counter */
+ u64 stat_pkts1519to1522; /* r-10G 1519 to 1522 byte good frame count*/
+ /* Total number of packets that were less than 64 octets */
+ /* long with a wrong CRC.*/
+ u64 stat_fragments;
+ /* Total number of packets longer than valid maximum length octets */
+ u64 stat_jabbers;
+ /* number of dropped packets due to internal errors of */
+ /* the MAC Client. */
+ u64 stat_drop_events;
+ /* Incremented when frames of correct length but with */
+ /* CRC error are received.*/
+ u64 stat_crc_align_errors;
+ /* Total number of packets that were less than 64 octets */
+ /* long with a good CRC.*/
+ u64 stat_undersize_pkts;
+ u64 stat_oversize_pkts; /**< T,B.D*/
+
+ u64 stat_rx_pause; /**< Pause MAC Control received */
+ u64 stat_tx_pause; /**< Pause MAC Control sent */
+
+ u64 in_octets; /**< Total number of byte received. */
+ u64 in_pkts; /* Total number of packets received.*/
+ u64 in_mcast_pkts; /* Total number of multicast frame received */
+ u64 in_bcast_pkts; /* Total number of broadcast frame received */
+ /* Frames received, but discarded due to */
+ /* problems within the MAC RX. */
+ u64 in_discards;
+ u64 in_errors; /* Number of frames received with error: */
+ /* - FIFO Overflow Error */
+ /* - CRC Error */
+ /* - Frame Too Long Error */
+ /* - Alignment Error */
+ u64 out_octets; /*Total number of byte sent. */
+ u64 out_pkts; /**< Total number of packets sent .*/
+ u64 out_mcast_pkts; /* Total number of multicast frame sent */
+ u64 out_bcast_pkts; /* Total number of multicast frame sent */
+ /* Frames received, but discarded due to problems within */
+ /* the MAC TX N/A!.*/
+ u64 out_discards;
+ u64 out_errors; /*Number of frames transmitted with error: */
+ /* - FIFO Overflow Error */
+ /* - FIFO Underflow Error */
+ /* - Other */
+};
+
+/*mac para struct ,mac get param from nic or dsaf when initialize*/
+struct mac_params {
+ char addr[ETH_ALEN];
+ u8 __iomem *vaddr; /*virtual address*/
+ struct device *dev;
+ u8 mac_id;
+ /**< Ethernet operation mode (MAC-PHY interface and speed) */
+ enum mac_mode mac_mode;
+};
+
+struct mac_info {
+ u16 speed;/* The forced speed (lower bits) in */
+ /* *mbps. Please use */
+ /* * ethtool_cmd_speed()/_set() to */
+ /* * access it */
+ u8 duplex; /* Duplex, half or full */
+ u8 auto_neg; /* Enable or disable autonegotiation */
+ enum hnae_loop loop_mode;
+ u8 tx_pause_en;
+ u8 tx_pause_time;
+ u8 rx_pause_en;
+ u8 pad_and_crc_en;
+ u8 promiscuous_en;
+ u8 port_en; /*port enable*/
+};
+
+struct mac_entry_idx {
+ u8 addr[ETH_ALEN];
+ u16 vlan_id:12;
+ u16 valid:1;
+ u16 qos:3;
+};
+
+struct mac_hw_stats {
+ u64 rx_good_pkts; /* only for xgmac */
+ u64 rx_good_bytes;
+ u64 rx_total_pkts; /* only for xgmac */
+ u64 rx_total_bytes; /* only for xgmac */
+ u64 rx_bad_bytes; /* only for gmac */
+ u64 rx_uc_pkts;
+ u64 rx_mc_pkts;
+ u64 rx_bc_pkts;
+ u64 rx_fragment_err; /* only for xgmac */
+ u64 rx_undersize; /* only for xgmac */
+ u64 rx_under_min;
+ u64 rx_minto64; /* only for gmac */
+ u64 rx_64bytes;
+ u64 rx_65to127;
+ u64 rx_128to255;
+ u64 rx_256to511;
+ u64 rx_512to1023;
+ u64 rx_1024to1518;
+ u64 rx_1519tomax;
+ u64 rx_1519tomax_good; /* only for xgmac */
+ u64 rx_oversize;
+ u64 rx_jabber_err;
+ u64 rx_fcs_err;
+ u64 rx_vlan_pkts; /* only for gmac */
+ u64 rx_data_err; /* only for gmac */
+ u64 rx_align_err; /* only for gmac */
+ u64 rx_long_err; /* only for gmac */
+ u64 rx_pfc_tc0;
+ u64 rx_pfc_tc1; /* only for xgmac */
+ u64 rx_pfc_tc2; /* only for xgmac */
+ u64 rx_pfc_tc3; /* only for xgmac */
+ u64 rx_pfc_tc4; /* only for xgmac */
+ u64 rx_pfc_tc5; /* only for xgmac */
+ u64 rx_pfc_tc6; /* only for xgmac */
+ u64 rx_pfc_tc7; /* only for xgmac */
+ u64 rx_unknown_ctrl;
+ u64 rx_filter_pkts; /* only for gmac */
+ u64 rx_filter_bytes; /* only for gmac */
+ u64 rx_fifo_overrun_err;/* only for gmac */
+ u64 rx_len_err; /* only for gmac */
+ u64 rx_comma_err; /* only for gmac */
+ u64 rx_symbol_err; /* only for xgmac */
+ u64 tx_good_to_sw; /* only for xgmac */
+ u64 tx_bad_to_sw; /* only for xgmac */
+ u64 rx_1731_pkts; /* only for xgmac */
+
+ u64 tx_good_bytes;
+ u64 tx_good_pkts; /* only for xgmac */
+ u64 tx_total_bytes; /* only for xgmac */
+ u64 tx_total_pkts; /* only for xgmac */
+ u64 tx_bad_bytes; /* only for gmac */
+ u64 tx_bad_pkts; /* only for xgmac */
+ u64 tx_uc_pkts;
+ u64 tx_mc_pkts;
+ u64 tx_bc_pkts;
+ u64 tx_undersize; /* only for xgmac */
+ u64 tx_fragment_err; /* only for xgmac */
+ u64 tx_under_min_pkts; /* only for gmac */
+ u64 tx_64bytes;
+ u64 tx_65to127;
+ u64 tx_128to255;
+ u64 tx_256to511;
+ u64 tx_512to1023;
+ u64 tx_1024to1518;
+ u64 tx_1519tomax;
+ u64 tx_1519tomax_good; /* only for xgmac */
+ u64 tx_oversize; /* only for xgmac */
+ u64 tx_jabber_err;
+ u64 tx_underrun_err; /* only for gmac */
+ u64 tx_vlan; /* only for gmac */
+ u64 tx_crc_err; /* only for gmac */
+ u64 tx_pfc_tc0;
+ u64 tx_pfc_tc1; /* only for xgmac */
+ u64 tx_pfc_tc2; /* only for xgmac */
+ u64 tx_pfc_tc3; /* only for xgmac */
+ u64 tx_pfc_tc4; /* only for xgmac */
+ u64 tx_pfc_tc5; /* only for xgmac */
+ u64 tx_pfc_tc6; /* only for xgmac */
+ u64 tx_pfc_tc7; /* only for xgmac */
+ u64 tx_ctrl; /* only for xgmac */
+ u64 tx_1731_pkts; /* only for xgmac */
+ u64 tx_1588_pkts; /* only for xgmac */
+ u64 rx_good_from_sw; /* only for xgmac */
+ u64 rx_bad_from_sw; /* only for xgmac */
+};
+
+struct hns_mac_cb {
+ struct device *dev;
+ struct dsaf_device *dsaf_dev;
+ struct mac_priv priv;
+ struct fwnode_handle *fw_port;
+ u8 __iomem *vaddr;
+ u8 __iomem *sys_ctl_vaddr;
+ u8 __iomem *serdes_vaddr;
+ struct regmap *serdes_ctrl;
+ struct regmap *cpld_ctrl;
+ char mc_mask[ETH_ALEN];
+ u32 cpld_ctrl_reg;
+ u32 port_rst_off;
+ u32 port_mode_off;
+ struct mac_entry_idx addr_entry_idx[DSAF_MAX_VM_NUM];
+ u8 sfp_prsnt;
+ u8 cpld_led_value;
+ u8 mac_id;
+
+ u8 link;
+ u8 half_duplex;
+ u16 speed;
+ u16 max_speed;
+ u16 max_frm;
+ u16 tx_pause_frm_time;
+ u32 if_support;
+ u64 txpkt_for_led;
+ u64 rxpkt_for_led;
+ enum hnae_port_type mac_type;
+ enum hnae_media_type media_type;
+ phy_interface_t phy_if;
+ enum hnae_loop loop_mode;
+
+ struct phy_device *phy_dev;
+
+ struct mac_hw_stats hw_stats;
+};
+
+struct mac_driver {
+ /*init Mac when init nic or dsaf*/
+ void (*mac_init)(void *mac_drv);
+ /*remove mac when remove nic or dsaf*/
+ void (*mac_free)(void *mac_drv);
+ /*enable mac when enable nic or dsaf*/
+ void (*mac_enable)(void *mac_drv, enum mac_commom_mode mode);
+ /*disable mac when disable nic or dsaf*/
+ void (*mac_disable)(void *mac_drv, enum mac_commom_mode mode);
+ /* config mac address*/
+ void (*set_mac_addr)(void *mac_drv, char *mac_addr);
+ /*adjust mac mode of port,include speed and duplex*/
+ int (*adjust_link)(void *mac_drv, enum mac_speed speed,
+ u32 full_duplex);
+ /* need adjust link */
+ bool (*need_adjust_link)(void *mac_drv, enum mac_speed speed,
+ int duplex);
+ /* config autoegotaite mode of port*/
+ void (*set_an_mode)(void *mac_drv, u8 enable);
+ /* config loopbank mode */
+ int (*config_loopback)(void *mac_drv, enum hnae_loop loop_mode,
+ u8 enable);
+ /* config mtu*/
+ void (*config_max_frame_length)(void *mac_drv, u16 newval);
+ /*config PAD and CRC enable */
+ void (*config_pad_and_crc)(void *mac_drv, u8 newval);
+ /* config duplex mode*/
+ void (*config_half_duplex)(void *mac_drv, u8 newval);
+ /*config tx pause time,if pause_time is zero,disable tx pause enable*/
+ void (*set_tx_auto_pause_frames)(void *mac_drv, u16 pause_time);
+ /*config rx pause enable*/
+ void (*set_rx_ignore_pause_frames)(void *mac_drv, u32 enable);
+ /* config rx mode for promiscuous*/
+ void (*set_promiscuous)(void *mac_drv, u8 enable);
+ void (*mac_pausefrm_cfg)(void *mac_drv, u32 rx_en, u32 tx_en);
+
+ void (*autoneg_stat)(void *mac_drv, u32 *enable);
+ int (*set_pause_enable)(void *mac_drv, u32 rx_en, u32 tx_en);
+ void (*get_pause_enable)(void *mac_drv, u32 *rx_en, u32 *tx_en);
+ void (*get_link_status)(void *mac_drv, u32 *link_stat);
+ /* get the imporant regs*/
+ void (*get_regs)(void *mac_drv, void *data);
+ int (*get_regs_count)(void);
+ /* get strings name for ethtool statistic */
+ void (*get_strings)(u32 stringset, u8 *data);
+ /* get the number of strings*/
+ int (*get_sset_count)(int stringset);
+
+ /* get the statistic by ethtools*/
+ void (*get_ethtool_stats)(void *mac_drv, u64 *data);
+
+ /* get mac information */
+ void (*get_info)(void *mac_drv, struct mac_info *mac_info);
+
+ void (*update_stats)(void *mac_drv);
+ int (*wait_fifo_clean)(void *mac_drv);
+
+ enum mac_mode mac_mode;
+ u8 mac_id;
+ struct hns_mac_cb *mac_cb;
+ u8 __iomem *io_base;
+ unsigned int mac_en_flg;/*you'd better don't enable mac twice*/
+ unsigned int virt_dev_num;
+ struct device *dev;
+};
+
+struct mac_stats_string {
+ const char desc[ETH_GSTRING_LEN];
+ unsigned long offset;
+};
+
+#define MAC_MAKE_MODE(interface, speed) (enum mac_mode)((interface) | (speed))
+#define MAC_INTERFACE_FROM_MODE(mode) (enum mac_intf)((mode) & 0xFFFF0000)
+#define MAC_SPEED_FROM_MODE(mode) (enum mac_speed)((mode) & 0x0000FFFF)
+#define MAC_STATS_FIELD_OFF(field) (offsetof(struct mac_hw_stats, field))
+
+static inline struct mac_driver *hns_mac_get_drv(
+ const struct hns_mac_cb *mac_cb)
+{
+ return (struct mac_driver *)(mac_cb->priv.mac);
+}
+
+void *hns_gmac_config(struct hns_mac_cb *mac_cb,
+ struct mac_params *mac_param);
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb,
+ struct mac_params *mac_param);
+
+int hns_mac_init(struct dsaf_device *dsaf_dev);
+void mac_adjust_link(struct net_device *net_dev);
+bool hns_mac_need_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
+void hns_mac_get_link_status(struct hns_mac_cb *mac_cb, u32 *link_status);
+int hns_mac_change_vf_addr(struct hns_mac_cb *mac_cb, u32 vmid, char *addr);
+int hns_mac_set_multi(struct hns_mac_cb *mac_cb,
+ u32 port_num, char *addr, bool enable);
+int hns_mac_vm_config_bc_en(struct hns_mac_cb *mac_cb, u32 vm, bool enable);
+void hns_mac_start(struct hns_mac_cb *mac_cb);
+void hns_mac_stop(struct hns_mac_cb *mac_cb);
+void hns_mac_uninit(struct dsaf_device *dsaf_dev);
+void hns_mac_adjust_link(struct hns_mac_cb *mac_cb, int speed, int duplex);
+void hns_mac_reset(struct hns_mac_cb *mac_cb);
+void hns_mac_get_autoneg(struct hns_mac_cb *mac_cb, u32 *auto_neg);
+void hns_mac_get_pauseparam(struct hns_mac_cb *mac_cb, u32 *rx_en, u32 *tx_en);
+int hns_mac_set_autoneg(struct hns_mac_cb *mac_cb, u8 enable);
+int hns_mac_set_pauseparam(struct hns_mac_cb *mac_cb, u32 rx_en, u32 tx_en);
+int hns_mac_set_mtu(struct hns_mac_cb *mac_cb, u32 new_mtu, u32 buf_size);
+int hns_mac_get_port_info(struct hns_mac_cb *mac_cb,
+ u8 *auto_neg, u16 *speed, u8 *duplex);
+int hns_mac_config_mac_loopback(struct hns_mac_cb *mac_cb,
+ enum hnae_loop loop, int en);
+void hns_mac_update_stats(struct hns_mac_cb *mac_cb);
+void hns_mac_get_stats(struct hns_mac_cb *mac_cb, u64 *data);
+void hns_mac_get_strings(struct hns_mac_cb *mac_cb, int stringset, u8 *data);
+int hns_mac_get_sset_count(struct hns_mac_cb *mac_cb, int stringset);
+void hns_mac_get_regs(struct hns_mac_cb *mac_cb, void *data);
+int hns_mac_get_regs_count(struct hns_mac_cb *mac_cb);
+void hns_set_led_opt(struct hns_mac_cb *mac_cb);
+int hns_cpld_led_set_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+void hns_mac_set_promisc(struct hns_mac_cb *mac_cb, u8 en);
+int hns_mac_get_inner_port_num(struct hns_mac_cb *mac_cb,
+ u8 vmid, u8 *port_num);
+int hns_mac_add_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr);
+int hns_mac_rm_uc_addr(struct hns_mac_cb *mac_cb, u8 vf_id,
+ const unsigned char *addr);
+int hns_mac_clr_multicast(struct hns_mac_cb *mac_cb, int vfn);
+void hns_mac_enable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+void hns_mac_disable(struct hns_mac_cb *mac_cb, enum mac_commom_mode mode);
+int hns_mac_wait_fifo_clean(struct hns_mac_cb *mac_cb);
+
+#endif /* _HNS_DSAF_MAC_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
new file mode 100644
index 000000000..87d3db466
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.c
@@ -0,0 +1,3158 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/vmalloc.h>
+
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+#include "hns_dsaf_misc.h"
+
+static const char *g_dsaf_mode_match[DSAF_MODE_MAX] = {
+ [DSAF_MODE_DISABLE_2PORT_64VM] = "2port-64vf",
+ [DSAF_MODE_DISABLE_6PORT_0VM] = "6port-16rss",
+ [DSAF_MODE_DISABLE_6PORT_16VM] = "6port-16vf",
+ [DSAF_MODE_DISABLE_SP] = "single-port",
+};
+
+static const struct acpi_device_id hns_dsaf_acpi_match[] = {
+ { "HISI00B1", 0 },
+ { "HISI00B2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_dsaf_acpi_match);
+
+static int hns_dsaf_get_cfg(struct dsaf_device *dsaf_dev)
+{
+ int ret, i;
+ u32 desc_num;
+ u32 buf_size;
+ u32 reset_offset = 0;
+ u32 res_idx = 0;
+ const char *mode_str;
+ struct regmap *syscon;
+ struct resource *res;
+ struct device_node *np = dsaf_dev->dev->of_node, *np_temp;
+ struct platform_device *pdev = to_platform_device(dsaf_dev->dev);
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ if (of_device_is_compatible(np, "hisilicon,hns-dsaf-v1"))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ if (acpi_dev_found(hns_dsaf_acpi_match[0].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_dsaf_acpi_match[1].id))
+ dsaf_dev->dsaf_ver = AE_VERSION_2;
+ else
+ return -ENXIO;
+ } else {
+ dev_err(dsaf_dev->dev, "cannot get cfg data from of or acpi\n");
+ return -ENXIO;
+ }
+
+ ret = device_property_read_string(dsaf_dev->dev, "mode", &mode_str);
+ if (ret) {
+ dev_err(dsaf_dev->dev, "get dsaf mode fail, ret=%d!\n", ret);
+ return ret;
+ }
+ for (i = 0; i < DSAF_MODE_MAX; i++) {
+ if (g_dsaf_mode_match[i] &&
+ !strcmp(mode_str, g_dsaf_mode_match[i]))
+ break;
+ }
+ if (i >= DSAF_MODE_MAX ||
+ i == DSAF_MODE_INVALID || i == DSAF_MODE_ENABLE) {
+ dev_err(dsaf_dev->dev,
+ "%s prs mode str fail!\n", dsaf_dev->ae_dev.name);
+ return -EINVAL;
+ }
+ dsaf_dev->dsaf_mode = (enum dsaf_mode)i;
+
+ if (dsaf_dev->dsaf_mode > DSAF_MODE_ENABLE)
+ dsaf_dev->dsaf_en = HRD_DSAF_NO_DSAF_MODE;
+ else
+ dsaf_dev->dsaf_en = HRD_DSAF_MODE;
+
+ if ((i == DSAF_MODE_ENABLE_16VM) ||
+ (i == DSAF_MODE_DISABLE_2PORT_8VM) ||
+ (i == DSAF_MODE_DISABLE_6PORT_2VM))
+ dsaf_dev->dsaf_tc_mode = HRD_DSAF_8TC_MODE;
+ else
+ dsaf_dev->dsaf_tc_mode = HRD_DSAF_4TC_MODE;
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ np_temp = of_parse_phandle(np, "subctrl-syscon", 0);
+ syscon = syscon_node_to_regmap(np_temp);
+ of_node_put(np_temp);
+ if (IS_ERR_OR_NULL(syscon)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "subctrl info is needed!\n");
+ return -ENOMEM;
+ }
+
+ dsaf_dev->sc_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(dsaf_dev->sc_base))
+ return PTR_ERR(dsaf_dev->sc_base);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "serdes-ctrl info is needed!\n");
+ return -ENOMEM;
+ }
+
+ dsaf_dev->sds_base = devm_ioremap_resource(&pdev->dev,
+ res);
+ if (IS_ERR(dsaf_dev->sds_base))
+ return PTR_ERR(dsaf_dev->sds_base);
+ } else {
+ dsaf_dev->sub_ctrl = syscon;
+ }
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ppe-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, res_idx++);
+ if (!res) {
+ dev_err(dsaf_dev->dev, "ppe-base info is needed!\n");
+ return -ENOMEM;
+ }
+ }
+ dsaf_dev->ppe_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dsaf_dev->ppe_base))
+ return PTR_ERR(dsaf_dev->ppe_base);
+ dsaf_dev->ppe_paddr = res->start;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "dsaf-base");
+ if (!res) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ res_idx);
+ if (!res) {
+ dev_err(dsaf_dev->dev,
+ "dsaf-base info is needed!\n");
+ return -ENOMEM;
+ }
+ }
+ dsaf_dev->io_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dsaf_dev->io_base))
+ return PTR_ERR(dsaf_dev->io_base);
+ }
+
+ ret = device_property_read_u32(dsaf_dev->dev, "desc-num", &desc_num);
+ if (ret < 0 || desc_num < HNS_DSAF_MIN_DESC_CNT ||
+ desc_num > HNS_DSAF_MAX_DESC_CNT) {
+ dev_err(dsaf_dev->dev, "get desc-num(%d) fail, ret=%d!\n",
+ desc_num, ret);
+ return -EINVAL;
+ }
+ dsaf_dev->desc_num = desc_num;
+
+ ret = device_property_read_u32(dsaf_dev->dev, "reset-field-offset",
+ &reset_offset);
+ if (ret < 0) {
+ dev_dbg(dsaf_dev->dev,
+ "get reset-field-offset fail, ret=%d!\r\n", ret);
+ }
+ dsaf_dev->reset_offset = reset_offset;
+
+ ret = device_property_read_u32(dsaf_dev->dev, "buf-size", &buf_size);
+ if (ret < 0) {
+ dev_err(dsaf_dev->dev,
+ "get buf-size fail, ret=%d!\r\n", ret);
+ return ret;
+ }
+ dsaf_dev->buf_size = buf_size;
+
+ dsaf_dev->buf_size_type = hns_rcb_buf_size2type(buf_size);
+ if (dsaf_dev->buf_size_type < 0) {
+ dev_err(dsaf_dev->dev,
+ "buf_size(%d) is wrong!\n", buf_size);
+ return -EINVAL;
+ }
+
+ dsaf_dev->misc_op = hns_misc_op_get(dsaf_dev);
+ if (!dsaf_dev->misc_op)
+ return -ENOMEM;
+
+ if (!dma_set_mask_and_coherent(dsaf_dev->dev, DMA_BIT_MASK(64ULL)))
+ dev_dbg(dsaf_dev->dev, "set mask to 64bit\n");
+ else
+ dev_err(dsaf_dev->dev, "set mask to 64bit fail!\n");
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_sbm_link_sram_init_en - config dsaf_sbm_init_en
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_sbm_link_sram_init_en(struct dsaf_device *dsaf_dev)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG, DSAF_CFG_SBM_INIT_S, 1);
+}
+
+/**
+ * hns_dsaf_reg_cnt_clr_ce - config hns_dsaf_reg_cnt_clr_ce
+ * @dsaf_dev: dsa fabric id
+ * @reg_cnt_clr_ce: config value
+ */
+static void
+hns_dsaf_reg_cnt_clr_ce(struct dsaf_device *dsaf_dev, u32 reg_cnt_clr_ce)
+{
+ dsaf_set_dev_bit(dsaf_dev, DSAF_DSA_REG_CNT_CLR_CE_REG,
+ DSAF_CNT_CLR_CE_S, reg_cnt_clr_ce);
+}
+
+/**
+ * hns_ppe_qid_cfg - config ppe qid
+ * @dsaf_dev: dsa fabric id
+ * @qid_cfg: value array
+ */
+static void
+hns_dsaf_ppe_qid_cfg(struct dsaf_device *dsaf_dev, u32 qid_cfg)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_PPE_QID_CFG_0_REG + 0x0004 * i,
+ DSAF_PPE_QID_CFG_M, DSAF_PPE_QID_CFG_S,
+ qid_cfg);
+ }
+}
+
+static void hns_dsaf_mix_def_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+ u16 max_q_per_vf, max_vfn;
+ u32 q_id, q_num_per_port;
+ u32 i;
+
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
+ q_num_per_port = max_vfn * max_q_per_vf;
+
+ for (i = 0, q_id = 0; i < DSAF_SERVICE_NW_NUM; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_MIX_DEF_QID_0_REG + 0x0004 * i,
+ 0xff, 0, q_id);
+ q_id += q_num_per_port;
+ }
+}
+
+static void hns_dsaf_inner_qid_cfg(struct dsaf_device *dsaf_dev)
+{
+ u16 max_q_per_vf, max_vfn;
+ u32 q_id, q_num_per_port;
+ u32 mac_id;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ return;
+
+ hns_rcb_get_queue_mode(dsaf_dev->dsaf_mode, &max_vfn, &max_q_per_vf);
+ q_num_per_port = max_vfn * max_q_per_vf;
+
+ for (mac_id = 0, q_id = 0; mac_id < DSAF_SERVICE_NW_NUM; mac_id++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAFV2_SERDES_LBK_0_REG + 4 * mac_id,
+ DSAFV2_SERDES_LBK_QID_M,
+ DSAFV2_SERDES_LBK_QID_S,
+ q_id);
+ q_id += q_num_per_port;
+ }
+}
+
+/**
+ * hns_dsaf_sw_port_type_cfg - cfg sw type
+ * @dsaf_dev: dsa fabric id
+ * @port_type: array
+ */
+static void hns_dsaf_sw_port_type_cfg(struct dsaf_device *dsaf_dev,
+ enum dsaf_sw_port_type port_type)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_SW_PORT_TYPE_0_REG + 0x0004 * i,
+ DSAF_SW_PORT_TYPE_M, DSAF_SW_PORT_TYPE_S,
+ port_type);
+ }
+}
+
+/**
+ * hns_dsaf_stp_port_type_cfg - cfg stp type
+ * @dsaf_dev: dsa fabric id
+ * @port_type: array
+ */
+static void hns_dsaf_stp_port_type_cfg(struct dsaf_device *dsaf_dev,
+ enum dsaf_stp_port_type port_type)
+{
+ u32 i;
+
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ dsaf_set_dev_field(dsaf_dev,
+ DSAF_STP_PORT_TYPE_0_REG + 0x0004 * i,
+ DSAF_STP_PORT_TYPE_M, DSAF_STP_PORT_TYPE_S,
+ port_type);
+ }
+}
+
+#define HNS_DSAF_SBM_NUM(dev) \
+ (AE_IS_VER1((dev)->dsaf_ver) ? DSAF_SBM_NUM : DSAFV2_SBM_NUM)
+/**
+ * hns_dsaf_sbm_cfg - config sbm
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_sbm_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_cfg;
+ u32 i;
+
+ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) {
+ o_sbm_cfg = dsaf_read_dev(dsaf_dev,
+ DSAF_SBM_CFG_REG_0_REG + 0x80 * i);
+ dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_EN_S, 1);
+ dsaf_set_bit(o_sbm_cfg, DSAF_SBM_CFG_SHCUT_EN_S, 0);
+ dsaf_write_dev(dsaf_dev,
+ DSAF_SBM_CFG_REG_0_REG + 0x80 * i, o_sbm_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_cfg_mib_en - config sbm
+ * @dsaf_dev: dsa fabric id
+ */
+static int hns_dsaf_sbm_cfg_mib_en(struct dsaf_device *dsaf_dev)
+{
+ u32 sbm_cfg_mib_en;
+ u32 i;
+ u32 reg;
+ u32 read_cnt;
+
+ /* validate configure by setting SBM_CFG_MIB_EN bit from 0 to 1. */
+ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) {
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 0);
+ }
+
+ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) {
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ dsaf_set_dev_bit(dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S, 1);
+ }
+
+ /* waitint for all sbm enable finished */
+ for (i = 0; i < HNS_DSAF_SBM_NUM(dsaf_dev); i++) {
+ read_cnt = 0;
+ reg = DSAF_SBM_CFG_REG_0_REG + 0x80 * i;
+ do {
+ udelay(1);
+ sbm_cfg_mib_en = dsaf_get_dev_bit(
+ dsaf_dev, reg, DSAF_SBM_CFG_MIB_EN_S);
+ read_cnt++;
+ } while (sbm_cfg_mib_en == 0 &&
+ read_cnt < DSAF_CFG_READ_CNT);
+
+ if (sbm_cfg_mib_en == 0) {
+ dev_err(dsaf_dev->dev,
+ "sbm_cfg_mib_en fail,%s,sbm_num=%d\n",
+ dsaf_dev->ae_dev.name, i);
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_sbm_bp_wl_cfg - config sbm
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_bp_cfg;
+ u32 reg;
+ u32 i;
+
+ /* XGE */
+ for (i = 0; i < DSAF_XGE_NUM; i++) {
+ reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S, 512);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M,
+ DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 104);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 128);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 110);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 160);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ /* for no enable pfc mode */
+ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 128);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 192);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
+ /* PPE */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 10);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 12);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
+ /* RoCEE */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_SET_BUF_NUM_M,
+ DSAF_SBM_CFG2_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg, DSAF_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAF_SBM_CFG2_RESET_BUF_NUM_S, 4);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+}
+
+static void hns_dsafv2_sbm_bp_wl_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 o_sbm_bp_cfg;
+ u32 reg;
+ u32 i;
+
+ /* XGE */
+ for (i = 0; i < DSAFV2_SBM_XGE_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S, 256);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_1_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S, 0);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M,
+ DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S, 0);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_SET_BUF_NUM_S, 104);
+ dsaf_set_field(o_sbm_bp_cfg, DSAFV2_SBM_CFG2_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_RESET_BUF_NUM_S, 128);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ reg = DSAF_SBM_BP_CFG_3_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M,
+ DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S, 55);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M,
+ DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S, 110);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+
+ /* for no enable pfc mode */
+ reg = DSAF_SBM_BP_CFG_4_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M,
+ DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S, 128);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M,
+ DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S, 192);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
+ /* PPE */
+ for (i = 0; i < DSAFV2_SBM_PPE_CHN; i++) {
+ reg = DSAF_SBM_BP_CFG_2_PPE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S, 3);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M,
+ DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S, 52);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+
+ /* RoCEE */
+ for (i = 0; i < DASFV2_ROCEE_CRD_NUM; i++) {
+ reg = DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG + 0x80 * i;
+ o_sbm_bp_cfg = dsaf_read_dev(dsaf_dev, reg);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S, 2);
+ dsaf_set_field(o_sbm_bp_cfg,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M,
+ DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S, 4);
+ dsaf_write_dev(dsaf_dev, reg, o_sbm_bp_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_voq_bp_all_thrd_cfg - voq
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_voq_bp_all_thrd_cfg(struct dsaf_device *dsaf_dev)
+{
+ u32 voq_bp_all_thrd;
+ u32 i;
+
+ for (i = 0; i < DSAF_VOQ_NUM; i++) {
+ voq_bp_all_thrd = dsaf_read_dev(
+ dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i);
+ if (i < DSAF_XGE_NUM) {
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_S, 930);
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_UPTHRD_M,
+ DSAF_VOQ_BP_ALL_UPTHRD_S, 950);
+ } else {
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_M,
+ DSAF_VOQ_BP_ALL_DOWNTHRD_S, 220);
+ dsaf_set_field(voq_bp_all_thrd,
+ DSAF_VOQ_BP_ALL_UPTHRD_M,
+ DSAF_VOQ_BP_ALL_UPTHRD_S, 230);
+ }
+ dsaf_write_dev(
+ dsaf_dev, DSAF_VOQ_BP_ALL_THRD_0_REG + 0x40 * i,
+ voq_bp_all_thrd);
+ }
+}
+
+static void hns_dsaf_tbl_tcam_match_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MATCH_CFG_L_REG,
+ ptbl_tcam_data->tbl_tcam_data_low);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MATCH_CFG_H_REG,
+ ptbl_tcam_data->tbl_tcam_data_high);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_cfg - tbl
+ * @dsaf_dev: dsa fabric id
+ * @ptbl_tcam_data: addr
+ */
+static void hns_dsaf_tbl_tcam_data_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_LOW_0_REG,
+ ptbl_tcam_data->tbl_tcam_data_low);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_HIGH_0_REG,
+ ptbl_tcam_data->tbl_tcam_data_high);
+}
+
+/**
+ * dsaf_tbl_tcam_mcast_cfg - tbl
+ * @dsaf_dev: dsa fabric id
+ * @mcast: addr
+ */
+static void hns_dsaf_tbl_tcam_mcast_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_mcast_cfg *mcast)
+{
+ u32 mcast_cfg4;
+
+ mcast_cfg4 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S,
+ mcast->tbl_mcast_item_vld);
+ dsaf_set_bit(mcast_cfg4, DSAF_TBL_MCAST_CFG4_OLD_EN_S,
+ mcast->tbl_mcast_old_en);
+ dsaf_set_field(mcast_cfg4, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+ DSAF_TBL_MCAST_CFG4_VM128_112_S,
+ mcast->tbl_mcast_port_msk[4]);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, mcast_cfg4);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG,
+ mcast->tbl_mcast_port_msk[3]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG,
+ mcast->tbl_mcast_port_msk[2]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG,
+ mcast->tbl_mcast_port_msk[1]);
+
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG,
+ mcast->tbl_mcast_port_msk[0]);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_ucast_cfg - tbl
+ * @dsaf_dev: dsa fabric id
+ * @tbl_tcam_ucast: addr
+ */
+static void hns_dsaf_tbl_tcam_ucast_cfg(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_tcam_ucast_cfg *tbl_tcam_ucast)
+{
+ u32 ucast_cfg1;
+
+ ucast_cfg1 = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S,
+ tbl_tcam_ucast->tbl_ucast_mac_discard);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_ITEM_VLD_S,
+ tbl_tcam_ucast->tbl_ucast_item_vld);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OLD_EN_S,
+ tbl_tcam_ucast->tbl_ucast_old_en);
+ dsaf_set_bit(ucast_cfg1, DSAF_TBL_UCAST_CFG1_DVC_S,
+ tbl_tcam_ucast->tbl_ucast_dvc);
+ dsaf_set_field(ucast_cfg1, DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_S,
+ tbl_tcam_ucast->tbl_ucast_out_port);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_UCAST_CFG_0_REG, ucast_cfg1);
+}
+
+/**
+ * hns_dsaf_tbl_line_cfg - tbl
+ * @dsaf_dev: dsa fabric id
+ * @tbl_lin: addr
+ */
+static void hns_dsaf_tbl_line_cfg(struct dsaf_device *dsaf_dev,
+ struct dsaf_tbl_line_cfg *tbl_lin)
+{
+ u32 tbl_line;
+
+ tbl_line = dsaf_read_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG);
+ dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_MAC_DISCARD_S,
+ tbl_lin->tbl_line_mac_discard);
+ dsaf_set_bit(tbl_line, DSAF_TBL_LINE_CFG_DVC_S,
+ tbl_lin->tbl_line_dvc);
+ dsaf_set_field(tbl_line, DSAF_TBL_LINE_CFG_OUT_PORT_M,
+ DSAF_TBL_LINE_CFG_OUT_PORT_S,
+ tbl_lin->tbl_line_out_port);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_LIN_CFG_0_REG, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_mcast_pul - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_mcast_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_line_pul - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 tbl_pul;
+
+ tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+ dsaf_set_bit(tbl_pul, DSAF_TBL_PUL_LINE_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_mcast_pul - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_mcast_pul(
+ struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_MCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_data_ucast_pul - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_data_ucast_pul(
+ struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 1);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_DATA_VLD_S, 0);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_UCAST_VLD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver) && !HNS_DSAF_IS_DEBUG(dsaf_dev))
+ dsaf_set_dev_bit(dsaf_dev, DSAF_CFG_0_REG,
+ DSAF_CFG_MIX_MODE_S, !!en);
+}
+
+/**
+ * hns_dsaf_tbl_stat_en - tbl
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_stat_en(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_ctrl;
+
+ o_tbl_ctrl = dsaf_read_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_UC_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_MC_LKUP_NUM_EN_S, 1);
+ dsaf_set_bit(o_tbl_ctrl, DSAF_TBL_DFX_BC_LKUP_NUM_EN_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_DFX_CTRL_0_REG, o_tbl_ctrl);
+}
+
+/**
+ * hns_dsaf_rocee_bp_en - rocee back press enable
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_rocee_bp_en(struct dsaf_device *dsaf_dev)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ dsaf_set_dev_bit(dsaf_dev, DSAF_XGE_CTRL_SIG_CFG_0_REG,
+ DSAF_FC_XGE_TX_PAUSE_S, 1);
+}
+
+/* set msk for dsaf exception irq*/
+static void hns_dsaf_int_xge_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 mask_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_INT_MSK_0_REG + 0x4 * chnn_num, mask_set);
+}
+
+static void hns_dsaf_int_ppe_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_PPE_INT_MSK_0_REG + 0x4 * chnn_num, msk_set);
+}
+
+static void hns_dsaf_int_rocee_msk_set(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_ROCEE_INT_MSK_0_REG + 0x4 * chnn, msk_set);
+}
+
+static void
+hns_dsaf_int_tbl_msk_set(struct dsaf_device *dsaf_dev, u32 msk_set)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_MSK_0_REG, msk_set);
+}
+
+/* clr dsaf exception irq*/
+static void hns_dsaf_int_xge_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn_num, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_INT_SRC_0_REG + 0x4 * chnn_num, int_src);
+}
+
+static void hns_dsaf_int_ppe_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_PPE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_rocee_src_clr(struct dsaf_device *dsaf_dev,
+ u32 chnn, u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev,
+ DSAF_ROCEE_INT_SRC_0_REG + 0x4 * chnn, int_src);
+}
+
+static void hns_dsaf_int_tbl_src_clr(struct dsaf_device *dsaf_dev,
+ u32 int_src)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_INT_SRC_0_REG, int_src);
+}
+
+/**
+ * hns_dsaf_single_line_tbl_cfg - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_line: the line
+ */
+static void hns_dsaf_single_line_tbl_cfg(
+ struct dsaf_device *dsaf_dev,
+ u32 address, struct dsaf_tbl_line_cfg *ptbl_line)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_line_addr_cfg(dsaf_dev, address);
+
+ /*Write Line*/
+ hns_dsaf_tbl_line_cfg(dsaf_dev, ptbl_line);
+
+ /*Write Plus*/
+ hns_dsaf_tbl_line_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_uc_cfg - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_ucast: unicast
+ */
+static void hns_dsaf_tcam_uc_cfg(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ /*Write Tcam Data*/
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+ /*Write Tcam Ucast*/
+ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, ptbl_tcam_ucast);
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg - cfg the tcam for mc
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address: tcam index
+ * @ptbl_tcam_data: tcam data struct pointer
+ * @ptbl_tcam_mask: tcam mask struct pointer, it must be null for HNSv1
+ * @ptbl_tcam_mcast: tcam data struct pointer
+ */
+static void hns_dsaf_tcam_mc_cfg(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_mask,
+ struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ /*Write Tcam Data*/
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, ptbl_tcam_data);
+ /*Write Tcam Mcast*/
+ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, ptbl_tcam_mcast);
+ /* Write Match Data */
+ if (ptbl_tcam_mask)
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, ptbl_tcam_mask);
+
+ /* Write Puls */
+ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_uc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address: the address
+ * @tcam_data: the data
+ * @tcam_mask: the mask
+ * @tcam_uc: the unicast data
+ */
+static void hns_dsaf_tcam_uc_cfg_vague(struct dsaf_device *dsaf_dev,
+ u32 address,
+ struct dsaf_tbl_tcam_data *tcam_data,
+ struct dsaf_tbl_tcam_data *tcam_mask,
+ struct dsaf_tbl_tcam_ucast_cfg *tcam_uc)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+ hns_dsaf_tbl_tcam_ucast_cfg(dsaf_dev, tcam_uc);
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+ hns_dsaf_tbl_tcam_data_ucast_pul(dsaf_dev);
+
+ /*Restore Match Data*/
+ tcam_mask->tbl_tcam_data_high = 0xffffffff;
+ tcam_mask->tbl_tcam_data_low = 0xffffffff;
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_cfg_vague - INT
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @address: the address
+ * @tcam_data: the data
+ * @tcam_mask: the mask
+ * @tcam_mc: the multicast data
+ */
+static void hns_dsaf_tcam_mc_cfg_vague(struct dsaf_device *dsaf_dev,
+ u32 address,
+ struct dsaf_tbl_tcam_data *tcam_data,
+ struct dsaf_tbl_tcam_data *tcam_mask,
+ struct dsaf_tbl_tcam_mcast_cfg *tcam_mc)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+ hns_dsaf_tbl_tcam_data_cfg(dsaf_dev, tcam_data);
+ hns_dsaf_tbl_tcam_mcast_cfg(dsaf_dev, tcam_mc);
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+ hns_dsaf_tbl_tcam_data_mcast_pul(dsaf_dev);
+
+ /*Restore Match Data*/
+ tcam_mask->tbl_tcam_data_high = 0xffffffff;
+ tcam_mask->tbl_tcam_data_low = 0xffffffff;
+ hns_dsaf_tbl_tcam_match_cfg(dsaf_dev, tcam_mask);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_invld - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ */
+static void hns_dsaf_tcam_mc_invld(struct dsaf_device *dsaf_dev, u32 address)
+{
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*write tcam mcast*/
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG, 0);
+
+ /*Write Plus*/
+ hns_dsaf_tbl_tcam_mcast_pul(dsaf_dev);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+static void
+hns_dsaf_tcam_addr_get(struct dsaf_drv_tbl_tcam_key *mac_key, u8 *addr)
+{
+ addr[0] = mac_key->high.bits.mac_0;
+ addr[1] = mac_key->high.bits.mac_1;
+ addr[2] = mac_key->high.bits.mac_2;
+ addr[3] = mac_key->high.bits.mac_3;
+ addr[4] = mac_key->low.bits.mac_4;
+ addr[5] = mac_key->low.bits.mac_5;
+}
+
+/**
+ * hns_dsaf_tcam_uc_get - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_ucast: unicast
+ */
+static void hns_dsaf_tcam_uc_get(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_ucast_cfg *ptbl_tcam_ucast)
+{
+ u32 tcam_read_data0;
+ u32 tcam_read_data4;
+
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*read tcam item puls*/
+ hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+ /*read tcam data*/
+ ptbl_tcam_data->tbl_tcam_data_high
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low
+ = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+
+ /*read tcam mcast*/
+ tcam_read_data0 = dsaf_read_dev(dsaf_dev,
+ DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ tcam_read_data4 = dsaf_read_dev(dsaf_dev,
+ DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+
+ ptbl_tcam_ucast->tbl_ucast_item_vld
+ = dsaf_get_bit(tcam_read_data4,
+ DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+ ptbl_tcam_ucast->tbl_ucast_old_en
+ = dsaf_get_bit(tcam_read_data4, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+ ptbl_tcam_ucast->tbl_ucast_mac_discard
+ = dsaf_get_bit(tcam_read_data0,
+ DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S);
+ ptbl_tcam_ucast->tbl_ucast_out_port
+ = dsaf_get_field(tcam_read_data0,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_M,
+ DSAF_TBL_UCAST_CFG1_OUT_PORT_S);
+ ptbl_tcam_ucast->tbl_ucast_dvc
+ = dsaf_get_bit(tcam_read_data0, DSAF_TBL_UCAST_CFG1_DVC_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tcam_mc_get - INT
+ * @dsaf_dev: dsa fabric id
+ * @address: the address
+ * @ptbl_tcam_data: the data
+ * @ptbl_tcam_mcast: tcam multicast data
+ */
+static void hns_dsaf_tcam_mc_get(
+ struct dsaf_device *dsaf_dev, u32 address,
+ struct dsaf_tbl_tcam_data *ptbl_tcam_data,
+ struct dsaf_tbl_tcam_mcast_cfg *ptbl_tcam_mcast)
+{
+ u32 data_tmp;
+
+ spin_lock_bh(&dsaf_dev->tcam_lock);
+
+ /*Write Addr*/
+ hns_dsaf_tbl_tcam_addr_cfg(dsaf_dev, address);
+
+ /*read tcam item puls*/
+ hns_dsaf_tbl_tcam_load_pul(dsaf_dev);
+
+ /*read tcam data*/
+ ptbl_tcam_data->tbl_tcam_data_high =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ ptbl_tcam_data->tbl_tcam_data_low =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+
+ /*read tcam mcast*/
+ ptbl_tcam_mcast->tbl_mcast_port_msk[0] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[1] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[2] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[3] =
+ dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+
+ data_tmp = dsaf_read_dev(dsaf_dev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ ptbl_tcam_mcast->tbl_mcast_item_vld =
+ dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_ITEM_VLD_S);
+ ptbl_tcam_mcast->tbl_mcast_old_en =
+ dsaf_get_bit(data_tmp, DSAF_TBL_MCAST_CFG4_OLD_EN_S);
+ ptbl_tcam_mcast->tbl_mcast_port_msk[4] =
+ dsaf_get_field(data_tmp, DSAF_TBL_MCAST_CFG4_VM128_112_M,
+ DSAF_TBL_MCAST_CFG4_VM128_112_S);
+
+ spin_unlock_bh(&dsaf_dev->tcam_lock);
+}
+
+/**
+ * hns_dsaf_tbl_line_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_line_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ /* defaultly set all lineal mac table entry resulting discard */
+ struct dsaf_tbl_line_cfg tbl_line[] = {{1, 0, 0} };
+
+ for (i = 0; i < DSAF_LINE_SUM; i++)
+ hns_dsaf_single_line_tbl_cfg(dsaf_dev, i, tbl_line);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_tcam_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ struct dsaf_tbl_tcam_data tcam_data[] = {{0, 0} };
+ struct dsaf_tbl_tcam_ucast_cfg tcam_ucast[] = {{0, 0, 0, 0, 0} };
+
+ /*tcam tbl*/
+ for (i = 0; i < DSAF_TCAM_SUM; i++)
+ hns_dsaf_tcam_uc_cfg(dsaf_dev, i, tcam_data, tcam_ucast);
+}
+
+/**
+ * hns_dsaf_pfc_en_cfg - dsaf pfc pause cfg
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: mac contrl block
+ * @tc_en: traffic class
+ */
+static void hns_dsaf_pfc_en_cfg(struct dsaf_device *dsaf_dev,
+ int mac_id, int tc_en)
+{
+ dsaf_write_dev(dsaf_dev, DSAF_PFC_EN_0_REG + mac_id * 4, tc_en);
+}
+
+static void hns_dsaf_set_pfc_pause(struct dsaf_device *dsaf_dev,
+ int mac_id, int tx_en, int rx_en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!tx_en || !rx_en)
+ dev_err(dsaf_dev->dev, "dsaf v1 can not close pfc!\n");
+
+ return;
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_RX_EN_B, !!rx_en);
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_PFC_PAUSE_TX_EN_B, !!tx_en);
+}
+
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ if (!en) {
+ dev_err(dsaf_dev->dev, "dsafv1 can't close rx_pause!\n");
+ return -EINVAL;
+ }
+ }
+
+ dsaf_set_dev_bit(dsaf_dev, DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B, !!en);
+
+ return 0;
+}
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en)
+{
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ *en = 1;
+ else
+ *en = dsaf_get_dev_bit(dsaf_dev,
+ DSAF_PAUSE_CFG_REG + mac_id * 4,
+ DSAF_MAC_PAUSE_RX_EN_B);
+}
+
+/**
+ * hns_dsaf_tbl_tcam_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_comm_init(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+ u32 o_dsaf_cfg;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+
+ o_dsaf_cfg = dsaf_read_dev(dsaf_dev, DSAF_CFG_0_REG);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_EN_S, dsaf_dev->dsaf_en);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_TC_MODE_S, dsaf_dev->dsaf_tc_mode);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_CRC_EN_S, 0);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_MIX_MODE_S, 0);
+ dsaf_set_bit(o_dsaf_cfg, DSAF_CFG_LOCA_ADDR_EN_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_CFG_0_REG, o_dsaf_cfg);
+
+ hns_dsaf_reg_cnt_clr_ce(dsaf_dev, 1);
+ hns_dsaf_stp_port_type_cfg(dsaf_dev, DSAF_STP_PORT_TYPE_FORWARD);
+
+ /* set 22 queue per tx ppe engine, only used in switch mode */
+ hns_dsaf_ppe_qid_cfg(dsaf_dev, DSAF_DEFAUTL_QUEUE_NUM_PER_PPE);
+
+ /* set promisc def queue id */
+ hns_dsaf_mix_def_qid_cfg(dsaf_dev);
+
+ /* set inner loopback queue id */
+ hns_dsaf_inner_qid_cfg(dsaf_dev);
+
+ /* in non switch mode, set all port to access mode */
+ hns_dsaf_sw_port_type_cfg(dsaf_dev, DSAF_SW_PORT_TYPE_NON_VLAN);
+
+ /*set dsaf pfc to 0 for parseing rx pause*/
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ hns_dsaf_pfc_en_cfg(dsaf_dev, i, 0);
+ hns_dsaf_set_pfc_pause(dsaf_dev, i, is_ver1, is_ver1);
+ }
+
+ /*msk and clr exception irqs */
+ for (i = 0; i < DSAF_COMM_CHN; i++) {
+ hns_dsaf_int_xge_src_clr(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_ppe_src_clr(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_rocee_src_clr(dsaf_dev, i, 0xfffffffful);
+
+ hns_dsaf_int_xge_msk_set(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_ppe_msk_set(dsaf_dev, i, 0xfffffffful);
+ hns_dsaf_int_rocee_msk_set(dsaf_dev, i, 0xfffffffful);
+ }
+ hns_dsaf_int_tbl_src_clr(dsaf_dev, 0xfffffffful);
+ hns_dsaf_int_tbl_msk_set(dsaf_dev, 0xfffffffful);
+}
+
+/**
+ * hns_dsaf_inode_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_inode_init(struct dsaf_device *dsaf_dev)
+{
+ u32 reg;
+ u32 tc_cfg;
+ u32 i;
+
+ if (dsaf_dev->dsaf_tc_mode == HRD_DSAF_4TC_MODE)
+ tc_cfg = HNS_DSAF_I4TC_CFG;
+ else
+ tc_cfg = HNS_DSAF_I8TC_CFG;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ for (i = 0; i < DSAF_INODE_NUM; i++) {
+ reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i;
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAF_INODE_IN_PORT_NUM_M,
+ DSAF_INODE_IN_PORT_NUM_S,
+ i % DSAF_XGE_NUM);
+ }
+ } else {
+ for (i = 0; i < DSAF_PORT_TYPE_NUM; i++) {
+ reg = DSAF_INODE_IN_PORT_NUM_0_REG + 0x80 * i;
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAF_INODE_IN_PORT_NUM_M,
+ DSAF_INODE_IN_PORT_NUM_S, 0);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT1_NUM_M,
+ DSAFV2_INODE_IN_PORT1_NUM_S, 1);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT2_NUM_M,
+ DSAFV2_INODE_IN_PORT2_NUM_S, 2);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT3_NUM_M,
+ DSAFV2_INODE_IN_PORT3_NUM_S, 3);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT4_NUM_M,
+ DSAFV2_INODE_IN_PORT4_NUM_S, 4);
+ dsaf_set_dev_field(dsaf_dev, reg,
+ DSAFV2_INODE_IN_PORT5_NUM_M,
+ DSAFV2_INODE_IN_PORT5_NUM_S, 5);
+ }
+ }
+ for (i = 0; i < DSAF_INODE_NUM; i++) {
+ reg = DSAF_INODE_PRI_TC_CFG_0_REG + 0x80 * i;
+ dsaf_write_dev(dsaf_dev, reg, tc_cfg);
+ }
+}
+
+/**
+ * hns_dsaf_sbm_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static int hns_dsaf_sbm_init(struct dsaf_device *dsaf_dev)
+{
+ u32 flag;
+ u32 finish_msk;
+ u32 cnt = 0;
+ int ret;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ hns_dsaf_sbm_bp_wl_cfg(dsaf_dev);
+ finish_msk = DSAF_SRAM_INIT_OVER_M;
+ } else {
+ hns_dsafv2_sbm_bp_wl_cfg(dsaf_dev);
+ finish_msk = DSAFV2_SRAM_INIT_OVER_M;
+ }
+
+ /* enable sbm chanel, disable sbm chanel shcut function*/
+ hns_dsaf_sbm_cfg(dsaf_dev);
+
+ /* enable sbm mib */
+ ret = hns_dsaf_sbm_cfg_mib_en(dsaf_dev);
+ if (ret) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_sbm_cfg_mib_en fail,%s, ret=%d\n",
+ dsaf_dev->ae_dev.name, ret);
+ return ret;
+ }
+
+ /* enable sbm initial link sram */
+ hns_dsaf_sbm_link_sram_init_en(dsaf_dev);
+
+ do {
+ usleep_range(200, 210);/*udelay(200);*/
+ flag = dsaf_get_dev_field(dsaf_dev, DSAF_SRAM_INIT_OVER_0_REG,
+ finish_msk, DSAF_SRAM_INIT_OVER_S);
+ cnt++;
+ } while (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S) &&
+ cnt < DSAF_CFG_READ_CNT);
+
+ if (flag != (finish_msk >> DSAF_SRAM_INIT_OVER_S)) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_sbm_init fail %s, flag=%d, cnt=%d\n",
+ dsaf_dev->ae_dev.name, flag, cnt);
+ return -ENODEV;
+ }
+
+ hns_dsaf_rocee_bp_en(dsaf_dev);
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_tbl_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_tbl_init(struct dsaf_device *dsaf_dev)
+{
+ hns_dsaf_tbl_stat_en(dsaf_dev);
+
+ hns_dsaf_tbl_tcam_init(dsaf_dev);
+ hns_dsaf_tbl_line_init(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_voq_init - INT
+ * @dsaf_dev: dsa fabric id
+ */
+static void hns_dsaf_voq_init(struct dsaf_device *dsaf_dev)
+{
+ hns_dsaf_voq_bp_all_thrd_cfg(dsaf_dev);
+}
+
+/**
+ * hns_dsaf_init_hw - init dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static int hns_dsaf_init_hw(struct dsaf_device *dsaf_dev)
+{
+ int ret;
+
+ dev_dbg(dsaf_dev->dev,
+ "hns_dsaf_init_hw begin %s !\n", dsaf_dev->ae_dev.name);
+
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
+ mdelay(10);
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 1);
+
+ hns_dsaf_comm_init(dsaf_dev);
+
+ /*init XBAR_INODE*/
+ hns_dsaf_inode_init(dsaf_dev);
+
+ /*init SBM*/
+ ret = hns_dsaf_sbm_init(dsaf_dev);
+ if (ret)
+ return ret;
+
+ /*init TBL*/
+ hns_dsaf_tbl_init(dsaf_dev);
+
+ /*init VOQ*/
+ hns_dsaf_voq_init(dsaf_dev);
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_remove_hw - uninit dsa fabric hardware
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_remove_hw(struct dsaf_device *dsaf_dev)
+{
+ /*reset*/
+ dsaf_dev->misc_op->dsaf_reset(dsaf_dev, 0);
+}
+
+/**
+ * hns_dsaf_init - init dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ * return 0 - success , negative --fail
+ */
+static int hns_dsaf_init(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ u32 i;
+ int ret;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 0;
+
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver))
+ dsaf_dev->tcam_max_num = DSAF_TCAM_SUM;
+ else
+ dsaf_dev->tcam_max_num =
+ DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM;
+
+ spin_lock_init(&dsaf_dev->tcam_lock);
+ ret = hns_dsaf_init_hw(dsaf_dev);
+ if (ret)
+ return ret;
+
+ /* malloc mem for tcam mac key(vlan+mac) */
+ priv->soft_mac_tbl = vzalloc(array_size(DSAF_TCAM_SUM,
+ sizeof(*priv->soft_mac_tbl)));
+ if (!priv->soft_mac_tbl) {
+ ret = -ENOMEM;
+ goto remove_hw;
+ }
+
+ /*all entry invall */
+ for (i = 0; i < DSAF_TCAM_SUM; i++)
+ (priv->soft_mac_tbl + i)->index = DSAF_INVALID_ENTRY_IDX;
+
+ return 0;
+
+remove_hw:
+ hns_dsaf_remove_hw(dsaf_dev);
+ return ret;
+}
+
+/**
+ * hns_dsaf_free - free dsa fabric
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static void hns_dsaf_free(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+
+ hns_dsaf_remove_hw(dsaf_dev);
+
+ /* free all mac mem */
+ vfree(priv->soft_mac_tbl);
+ priv->soft_mac_tbl = NULL;
+}
+
+/**
+ * hns_dsaf_find_soft_mac_entry - find dsa fabric soft entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: mac entry struct pointer
+ */
+static u16 hns_dsaf_find_soft_mac_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_tbl_tcam_key *mac_key)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u32 i;
+
+ soft_mac_entry = priv->soft_mac_tbl;
+ for (i = 0; i < dsaf_dev->tcam_max_num; i++) {
+ /* invall tab entry */
+ if ((soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX) &&
+ (soft_mac_entry->tcam_key.high.val == mac_key->high.val) &&
+ (soft_mac_entry->tcam_key.low.val == mac_key->low.val))
+ /* return find result --soft index */
+ return soft_mac_entry->index;
+
+ soft_mac_entry++;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_find_empty_mac_entry - search dsa fabric soft empty-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u32 i;
+
+ soft_mac_entry = priv->soft_mac_tbl;
+ for (i = 0; i < dsaf_dev->tcam_max_num; i++) {
+ /* inv all entry */
+ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+ /* return find result --soft index */
+ return i;
+
+ soft_mac_entry++;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_find_empty_mac_entry_reverse
+ * search dsa fabric soft empty-entry from the end
+ * @dsaf_dev: dsa fabric device struct pointer
+ */
+static u16 hns_dsaf_find_empty_mac_entry_reverse(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ int i;
+
+ soft_mac_entry = priv->soft_mac_tbl + (DSAF_TCAM_SUM - 1);
+ for (i = (DSAF_TCAM_SUM - 1); i > 0; i--) {
+ /* search all entry from end to start.*/
+ if (soft_mac_entry->index == DSAF_INVALID_ENTRY_IDX)
+ return i;
+ soft_mac_entry--;
+ }
+ return DSAF_INVALID_ENTRY_IDX;
+}
+
+/**
+ * hns_dsaf_set_mac_key - set mac key
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_key: tcam key pointer
+ * @vlan_id: vlan id
+ * @in_port_num: input port num
+ * @addr: mac addr
+ */
+static void hns_dsaf_set_mac_key(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_tbl_tcam_key *mac_key, u16 vlan_id, u8 in_port_num,
+ u8 *addr)
+{
+ u8 port;
+
+ if (dsaf_dev->dsaf_mode <= DSAF_MODE_ENABLE)
+ /*DSAF mode : in port id fixed 0*/
+ port = 0;
+ else
+ /*non-dsaf mode*/
+ port = in_port_num;
+
+ mac_key->high.bits.mac_0 = addr[0];
+ mac_key->high.bits.mac_1 = addr[1];
+ mac_key->high.bits.mac_2 = addr[2];
+ mac_key->high.bits.mac_3 = addr[3];
+ mac_key->low.bits.mac_4 = addr[4];
+ mac_key->low.bits.mac_5 = addr[5];
+ mac_key->low.bits.port_vlan = 0;
+ dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_VLAN_M,
+ DSAF_TBL_TCAM_KEY_VLAN_S, vlan_id);
+ dsaf_set_field(mac_key->low.bits.port_vlan, DSAF_TBL_TCAM_KEY_PORT_M,
+ DSAF_TBL_TCAM_KEY_PORT_S, port);
+}
+
+/**
+ * hns_dsaf_set_mac_uc_entry - set mac uc-entry
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: uc-mac entry
+ */
+int hns_dsaf_set_mac_uc_entry(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_tbl_tcam_ucast_cfg mac_data;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ struct dsaf_tbl_tcam_data tcam_data;
+
+ /* mac addr check */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr) ||
+ MAC_IS_BROADCAST(mac_entry->addr) ||
+ MAC_IS_MULTICAST(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev, "set_uc %s Mac %pM err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr);
+ return -EINVAL;
+ }
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ /* entry ie exist? */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if has not inv entry,find a empty entry */
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /* has not empty,return error */
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /* config hardware entry */
+ mac_data.tbl_ucast_item_vld = 1;
+ mac_data.tbl_ucast_mac_discard = 0;
+ mac_data.tbl_ucast_old_en = 0;
+ /* default config dvc to 0 */
+ mac_data.tbl_ucast_dvc = 0;
+ mac_data.tbl_ucast_out_port = mac_entry->port_num;
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
+
+ hns_dsaf_tcam_uc_cfg(dsaf_dev, entry_index, &tcam_data, &mac_data);
+
+ /* config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+int hns_dsaf_rm_mac_addr(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_tbl_tcam_ucast_cfg mac_data;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+
+ /* mac addr check */
+ if (!is_valid_ether_addr(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev, "rm_uc_addr %s Mac %pM err!\n",
+ dsaf_dev->ae_dev.name, mac_entry->addr);
+ return -EINVAL;
+ }
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mac_entry->addr);
+
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /* can not find the tcam entry, return 0 */
+ dev_info(dsaf_dev->dev,
+ "rm_uc_addr no tcam, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return 0;
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "rm_uc_addr, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ hns_dsaf_tcam_uc_get(
+ dsaf_dev, entry_index,
+ (struct dsaf_tbl_tcam_data *)&mac_key,
+ &mac_data);
+
+ /* unicast entry not used locally should not clear */
+ if (mac_entry->port_num != mac_data.tbl_ucast_out_port)
+ return -EFAULT;
+
+ return hns_dsaf_del_mac_entry(dsaf_dev,
+ mac_entry->in_vlan_id,
+ mac_entry->in_port_num,
+ mac_entry->addr);
+}
+
+static void hns_dsaf_setup_mc_mask(struct dsaf_device *dsaf_dev,
+ u8 port_num, u8 *mask, u8 *addr)
+{
+ if (MAC_IS_BROADCAST(addr))
+ eth_broadcast_addr(mask);
+ else
+ memcpy(mask, dsaf_dev->mac_cb[port_num]->mc_mask, ETH_ALEN);
+}
+
+static void hns_dsaf_mc_mask_bit_clear(char *dst, const char *src)
+{
+ u16 *a = (u16 *)dst;
+ const u16 *b = (const u16 *)src;
+
+ a[0] &= b[0];
+ a[1] &= b[1];
+ a[2] &= b[2];
+}
+
+/**
+ * hns_dsaf_add_mac_mc_port - add mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mc-mac entry
+ */
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_tbl_tcam_key mask_key;
+ struct dsaf_tbl_tcam_data *pmask_key = NULL;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ struct dsaf_tbl_tcam_data tcam_data;
+ u8 mc_addr[ETH_ALEN];
+ int mskid;
+
+ /*chechk mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev, "set_entry failed,addr %pM!\n",
+ mac_entry->addr);
+ return -EINVAL;
+ }
+
+ ether_addr_copy(mc_addr, mac_entry->addr);
+ if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ u8 mc_mask[ETH_ALEN];
+
+ /* prepare for key data setting */
+ hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num,
+ mc_mask, mac_entry->addr);
+ hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
+
+ /* config key mask */
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key,
+ 0x0,
+ 0xff,
+ mc_mask);
+
+ pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(
+ dsaf_dev, &mac_key, mac_entry->in_vlan_id,
+ mac_entry->in_port_num, mc_addr);
+
+ memset(&mac_data, 0, sizeof(struct dsaf_tbl_tcam_mcast_cfg));
+
+ /* check if the tcam is exist */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot , find a empty*/
+ entry_index = hns_dsaf_find_empty_mac_entry(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*if hasnot empty, error*/
+ dev_err(dsaf_dev->dev,
+ "set_uc_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val);
+ return -EINVAL;
+ }
+ } else {
+ /* if exist, add in */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data,
+ &mac_data);
+ }
+
+ /* config hardware entry */
+ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+ mskid = mac_entry->port_num;
+ } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = mac_entry->port_num -
+ DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+ } else {
+ dev_err(dsaf_dev->dev,
+ "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_entry->port_num,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 1);
+ mac_data.tbl_mcast_old_en = 0;
+ mac_data.tbl_mcast_item_vld = 1;
+
+ dev_dbg(dsaf_dev->dev,
+ "set_uc_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
+
+ /* config mc entry with mask */
+ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index, &tcam_data,
+ pmask_key, &mac_data);
+
+ /*config software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_entry - del mac mc-port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @vlan_id: vlian id
+ * @in_port_num: input port num
+ * @addr : mac addr
+ */
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+ u8 in_port_num, u8 *addr)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_priv *priv =
+ (struct dsaf_drv_priv *)hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(addr) || MAC_IS_BROADCAST(addr)) {
+ dev_err(dsaf_dev->dev, "del_entry failed,addr %pM!\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /*config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, addr);
+
+ /*exist ?*/
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*not exist, error */
+ dev_err(dsaf_dev->dev,
+ "del_mac_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dev_dbg(dsaf_dev->dev,
+ "del_mac_entry, %s Mac key(%#x:%#x) entry_index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /*do del opt*/
+ hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+ /*del soft emtry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+
+ return 0;
+}
+
+/**
+ * hns_dsaf_del_mac_mc_port - del mac mc- port
+ * @dsaf_dev: dsa fabric device struct pointer
+ * @mac_entry: mac entry
+ */
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry)
+{
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry = priv->soft_mac_tbl;
+ u16 vlan_id;
+ u8 in_port_num;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ struct dsaf_tbl_tcam_data tcam_data;
+ int mskid;
+ const u8 empty_msk[sizeof(mac_data.tbl_mcast_port_msk)] = {0};
+ struct dsaf_drv_tbl_tcam_key mask_key;
+ struct dsaf_tbl_tcam_data *pmask_key = NULL;
+ u8 mc_addr[ETH_ALEN];
+
+ if (!(void *)mac_entry) {
+ dev_err(dsaf_dev->dev,
+ "hns_dsaf_del_mac_mc_port mac_entry is NULL\n");
+ return -EINVAL;
+ }
+
+ /*check mac addr */
+ if (MAC_IS_ALL_ZEROS(mac_entry->addr)) {
+ dev_err(dsaf_dev->dev, "del_port failed, addr %pM!\n",
+ mac_entry->addr);
+ return -EINVAL;
+ }
+
+ /* always mask vlan_id field */
+ ether_addr_copy(mc_addr, mac_entry->addr);
+
+ if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ u8 mc_mask[ETH_ALEN];
+
+ /* prepare for key data setting */
+ hns_dsaf_setup_mc_mask(dsaf_dev, mac_entry->in_port_num,
+ mc_mask, mac_entry->addr);
+ hns_dsaf_mc_mask_bit_clear(mc_addr, mc_mask);
+
+ /* config key mask */
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, 0x00, 0xff, mc_mask);
+
+ pmask_key = (struct dsaf_tbl_tcam_data *)(&mask_key);
+ }
+
+ /* get key info */
+ vlan_id = mac_entry->in_vlan_id;
+ in_port_num = mac_entry->in_port_num;
+
+ /* config key */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, vlan_id, in_port_num, mc_addr);
+
+ /* check if the tcam entry is exist */
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ /*find none */
+ dev_err(dsaf_dev->dev,
+ "find_soft_mac_entry failed, %s Mac key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+
+ dev_dbg(dsaf_dev->dev,
+ "del_mac_mc_port, %s key(%#x:%#x) index%d\n",
+ dsaf_dev->ae_dev.name, mac_key.high.val,
+ mac_key.low.val, entry_index);
+
+ /* read entry */
+ hns_dsaf_tcam_mc_get(dsaf_dev, entry_index, &tcam_data, &mac_data);
+
+ /*del the port*/
+ if (mac_entry->port_num < DSAF_SERVICE_NW_NUM) {
+ mskid = mac_entry->port_num;
+ } else if (mac_entry->port_num >= DSAF_BASE_INNER_PORT_NUM) {
+ mskid = mac_entry->port_num -
+ DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+ } else {
+ dev_err(dsaf_dev->dev,
+ "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, mac_entry->port_num,
+ mac_key.high.val, mac_key.low.val);
+ return -EINVAL;
+ }
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mskid / 32], mskid % 32, 0);
+
+ /*check non port, do del entry */
+ if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk,
+ sizeof(mac_data.tbl_mcast_port_msk))) {
+ hns_dsaf_tcam_mc_invld(dsaf_dev, entry_index);
+
+ /* del soft entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+ } else { /* not zero, just del port, update */
+ tcam_data.tbl_tcam_data_high = mac_key.high.val;
+ tcam_data.tbl_tcam_data_low = mac_key.low.val;
+
+ hns_dsaf_tcam_mc_cfg(dsaf_dev, entry_index,
+ &tcam_data,
+ pmask_key, &mac_data);
+ }
+
+ return 0;
+}
+
+int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev, u8 mac_id,
+ u8 port_num)
+{
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ struct dsaf_tbl_tcam_mcast_cfg mac_data;
+ int ret = 0, i;
+
+ if (HNS_DSAF_IS_DEBUG(dsaf_dev))
+ return 0;
+
+ for (i = 0; i < DSAF_TCAM_SUM - DSAFV2_MAC_FUZZY_TCAM_NUM; i++) {
+ u8 addr[ETH_ALEN];
+ u8 port;
+
+ soft_mac_entry = priv->soft_mac_tbl + i;
+
+ hns_dsaf_tcam_addr_get(&soft_mac_entry->tcam_key, addr);
+ port = dsaf_get_field(
+ soft_mac_entry->tcam_key.low.bits.port_vlan,
+ DSAF_TBL_TCAM_KEY_PORT_M,
+ DSAF_TBL_TCAM_KEY_PORT_S);
+ /* check valid tcam mc entry */
+ if (soft_mac_entry->index != DSAF_INVALID_ENTRY_IDX &&
+ port == mac_id &&
+ is_multicast_ether_addr(addr) &&
+ !is_broadcast_ether_addr(addr)) {
+ const u32 empty_msk[DSAF_PORT_MSK_NUM] = {0};
+ struct dsaf_drv_mac_single_dest_entry mac_entry;
+
+ /* disable receiving of this multicast address for
+ * the VF.
+ */
+ ether_addr_copy(mac_entry.addr, addr);
+ mac_entry.in_vlan_id = dsaf_get_field(
+ soft_mac_entry->tcam_key.low.bits.port_vlan,
+ DSAF_TBL_TCAM_KEY_VLAN_M,
+ DSAF_TBL_TCAM_KEY_VLAN_S);
+ mac_entry.in_port_num = mac_id;
+ mac_entry.port_num = port_num;
+ if (hns_dsaf_del_mac_mc_port(dsaf_dev, &mac_entry)) {
+ ret = -EINVAL;
+ continue;
+ }
+
+ /* disable receiving of this multicast address for
+ * the mac port if all VF are disable
+ */
+ hns_dsaf_tcam_mc_get(dsaf_dev, i,
+ (struct dsaf_tbl_tcam_data *)
+ (&soft_mac_entry->tcam_key),
+ &mac_data);
+ dsaf_set_bit(mac_data.tbl_mcast_port_msk[mac_id / 32],
+ mac_id % 32, 0);
+ if (!memcmp(mac_data.tbl_mcast_port_msk, empty_msk,
+ sizeof(u32) * DSAF_PORT_MSK_NUM)) {
+ mac_entry.port_num = mac_id;
+ if (hns_dsaf_del_mac_mc_port(dsaf_dev,
+ &mac_entry)) {
+ ret = -EINVAL;
+ continue;
+ }
+ }
+ }
+ }
+
+ return ret;
+}
+
+static struct dsaf_device *hns_dsaf_alloc_dev(struct device *dev,
+ size_t sizeof_priv)
+{
+ struct dsaf_device *dsaf_dev;
+
+ dsaf_dev = devm_kzalloc(dev,
+ sizeof(*dsaf_dev) + sizeof_priv, GFP_KERNEL);
+ if (unlikely(!dsaf_dev)) {
+ dsaf_dev = ERR_PTR(-ENOMEM);
+ } else {
+ dsaf_dev->dev = dev;
+ dev_set_drvdata(dev, dsaf_dev);
+ }
+
+ return dsaf_dev;
+}
+
+/**
+ * hns_dsaf_free_dev - free dev mem
+ * @dsaf_dev: struct device pointer
+ */
+static void hns_dsaf_free_dev(struct dsaf_device *dsaf_dev)
+{
+ (void)dev_set_drvdata(dsaf_dev->dev, NULL);
+}
+
+/**
+ * dsaf_pfc_unit_cnt - set pfc unit count
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: id in use
+ * @rate: value array
+ */
+static void hns_dsaf_pfc_unit_cnt(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate)
+{
+ u32 unit_cnt;
+
+ switch (rate) {
+ case DSAF_PORT_RATE_10000:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+ break;
+ case DSAF_PORT_RATE_1000:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+ break;
+ case DSAF_PORT_RATE_2500:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000;
+ break;
+ default:
+ unit_cnt = HNS_DSAF_PFC_UNIT_CNT_FOR_XGE;
+ }
+
+ dsaf_set_dev_field(dsaf_dev,
+ (DSAF_PFC_UNIT_CNT_0_REG + 0x4 * (u64)mac_id),
+ DSAF_PFC_UNINT_CNT_M, DSAF_PFC_UNINT_CNT_S,
+ unit_cnt);
+}
+
+/**
+ * dsaf_port_work_rate_cfg - fifo
+ * @dsaf_dev: dsa fabric id
+ * @mac_id: mac contrl block
+ * @rate_mode: value array
+ */
+static void
+hns_dsaf_port_work_rate_cfg(struct dsaf_device *dsaf_dev, int mac_id,
+ enum dsaf_port_rate_mode rate_mode)
+{
+ u32 port_work_mode;
+
+ port_work_mode = dsaf_read_dev(
+ dsaf_dev, DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id);
+
+ if (rate_mode == DSAF_PORT_RATE_10000)
+ dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 1);
+ else
+ dsaf_set_bit(port_work_mode, DSAF_XGE_GE_WORK_MODE_S, 0);
+
+ dsaf_write_dev(dsaf_dev,
+ DSAF_XGE_GE_WORK_MODE_0_REG + 0x4 * (u64)mac_id,
+ port_work_mode);
+
+ hns_dsaf_pfc_unit_cnt(dsaf_dev, mac_id, rate_mode);
+}
+
+/**
+ * hns_dsaf_fix_mac_mode - dsaf modify mac mode
+ * @mac_cb: mac contrl block
+ */
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb)
+{
+ enum dsaf_port_rate_mode mode;
+ struct dsaf_device *dsaf_dev = mac_cb->dsaf_dev;
+ int mac_id = mac_cb->mac_id;
+
+ if (mac_cb->mac_type != HNAE_PORT_SERVICE)
+ return;
+ if (mac_cb->phy_if == PHY_INTERFACE_MODE_XGMII)
+ mode = DSAF_PORT_RATE_10000;
+ else
+ mode = DSAF_PORT_RATE_1000;
+
+ hns_dsaf_port_work_rate_cfg(dsaf_dev, mac_id, mode);
+}
+
+static u32 hns_dsaf_get_inode_prio_reg(int index)
+{
+ int base_index, offset;
+ u32 base_addr = DSAF_INODE_IN_PRIO_PAUSE_BASE_REG;
+
+ base_index = (index + 1) / DSAF_REG_PER_ZONE;
+ offset = (index + 1) % DSAF_REG_PER_ZONE;
+
+ return base_addr + DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET * base_index +
+ DSAF_INODE_IN_PRIO_PAUSE_OFFSET * offset;
+}
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 node_num)
+{
+ struct dsaf_hw_stats *hw_stats
+ = &dsaf_dev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+ int i;
+ u32 reg_tmp;
+
+ hw_stats->pad_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_PAD_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->man_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rx_pkt_id += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_PID_NUM_0_REG + 0x80 * (u64)node_num);
+
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ hw_stats->rx_pause_frame +=
+ dsaf_read_dev(dsaf_dev, reg_tmp + 0x80 * (u64)node_num);
+
+ hw_stats->release_buf_num += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_RELS_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->sbm_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SBM_DROP_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->crc_false += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_CRC_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->bp_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_BP_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->rslt_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_RSLT_DISCARD_NUM_0_REG + 0x80 * (u64)node_num);
+ hw_stats->local_addr_false += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + 0x80 * (u64)node_num);
+
+ hw_stats->vlan_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + 4 * (u64)node_num);
+ hw_stats->stp_drop += dsaf_read_dev(dsaf_dev,
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + 4 * (u64)node_num);
+
+ /* pfc pause frame statistics stored in dsaf inode*/
+ if ((node_num < DSAF_SERVICE_NW_NUM) && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ reg_tmp = hns_dsaf_get_inode_prio_reg(i);
+ hw_stats->rx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ reg_tmp + 0x4 * (u64)node_num);
+ hw_stats->tx_pfc[i] += dsaf_read_dev(dsaf_dev,
+ DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG +
+ DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET * i +
+ 0xF0 * (u64)node_num);
+ }
+ }
+ hw_stats->tx_pkts += dsaf_read_dev(dsaf_dev,
+ DSAF_XOD_RCVPKT_CNT_0_REG + 0x90 * (u64)node_num);
+}
+
+/**
+ *hns_dsaf_get_regs - dump dsaf regs
+ *@ddev: dsaf device
+ *@port: port
+ *@data:data for value of regs
+ */
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data)
+{
+ u32 i = 0;
+ u32 j;
+ u32 *p = data;
+ u32 reg_tmp;
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
+
+ /* dsaf common registers */
+ p[0] = dsaf_read_dev(ddev, DSAF_SRAM_INIT_OVER_0_REG);
+ p[1] = dsaf_read_dev(ddev, DSAF_CFG_0_REG);
+ p[2] = dsaf_read_dev(ddev, DSAF_ECC_ERR_INVERT_0_REG);
+ p[3] = dsaf_read_dev(ddev, DSAF_ABNORMAL_TIMEOUT_0_REG);
+ p[4] = dsaf_read_dev(ddev, DSAF_FSM_TIMEOUT_0_REG);
+ p[5] = dsaf_read_dev(ddev, DSAF_DSA_REG_CNT_CLR_CE_REG);
+ p[6] = dsaf_read_dev(ddev, DSAF_DSA_SBM_INF_FIFO_THRD_REG);
+ p[7] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_SEL_REG);
+ p[8] = dsaf_read_dev(ddev, DSAF_DSA_SRAM_1BIT_ECC_CNT_REG);
+
+ p[9] = dsaf_read_dev(ddev, DSAF_PFC_EN_0_REG + port * 4);
+ p[10] = dsaf_read_dev(ddev, DSAF_PFC_UNIT_CNT_0_REG + port * 4);
+ p[11] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+ p[12] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+ p[13] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+ p[14] = dsaf_read_dev(ddev, DSAF_XGE_INT_MSK_0_REG + port * 4);
+ p[15] = dsaf_read_dev(ddev, DSAF_PPE_INT_MSK_0_REG + port * 4);
+ p[16] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_MSK_0_REG + port * 4);
+ p[17] = dsaf_read_dev(ddev, DSAF_XGE_INT_SRC_0_REG + port * 4);
+ p[18] = dsaf_read_dev(ddev, DSAF_PPE_INT_SRC_0_REG + port * 4);
+ p[19] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_SRC_0_REG + port * 4);
+ p[20] = dsaf_read_dev(ddev, DSAF_XGE_INT_STS_0_REG + port * 4);
+ p[21] = dsaf_read_dev(ddev, DSAF_PPE_INT_STS_0_REG + port * 4);
+ p[22] = dsaf_read_dev(ddev, DSAF_ROCEE_INT_STS_0_REG + port * 4);
+ p[23] = dsaf_read_dev(ddev, DSAF_PPE_QID_CFG_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+ p[24 + i] = dsaf_read_dev(ddev,
+ DSAF_SW_PORT_TYPE_0_REG + i * 4);
+
+ p[32] = dsaf_read_dev(ddev, DSAF_MIX_DEF_QID_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++)
+ p[33 + i] = dsaf_read_dev(ddev,
+ DSAF_PORT_DEF_VLAN_0_REG + i * 4);
+
+ for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++)
+ p[41 + i] = dsaf_read_dev(ddev,
+ DSAF_VM_DEF_VLAN_0_REG + i * 4);
+
+ /* dsaf inode registers */
+ p[170] = dsaf_read_dev(ddev, DSAF_INODE_CUT_THROUGH_CFG_0_REG);
+
+ p[171] = dsaf_read_dev(ddev,
+ DSAF_INODE_ECC_ERR_ADDR_0_REG + port * 0x80);
+
+ for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[172 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_IN_PORT_NUM_0_REG + j * 0x80);
+ p[175 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_PRI_TC_CFG_0_REG + j * 0x80);
+ p[178 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BP_STATUS_0_REG + j * 0x80);
+ p[181 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_PAD_DISCARD_NUM_0_REG + j * 0x80);
+ p[184 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_MAN_NUM_0_REG + j * 0x80);
+ p[187 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_FINAL_IN_PKT_NUM_0_REG + j * 0x80);
+ p[190 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_PID_NUM_0_REG + j * 0x80);
+ reg_tmp = is_ver1 ? DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG :
+ DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG;
+ p[193 + i] = dsaf_read_dev(ddev, reg_tmp + j * 0x80);
+ p[196 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_RELS_NUM_0_REG + j * 0x80);
+ p[199 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SBM_DROP_NUM_0_REG + j * 0x80);
+ p[202 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_CRC_FALSE_NUM_0_REG + j * 0x80);
+ p[205 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BP_DISCARD_NUM_0_REG + j * 0x80);
+ p[208 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_RSLT_DISCARD_NUM_0_REG + j * 0x80);
+ p[211 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG + j * 0x80);
+ p[214 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_VOQ_OVER_NUM_0_REG + j * 0x80);
+ p[217 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BD_SAVE_STATUS_0_REG + j * 4);
+ p[220 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_BD_ORDER_STATUS_0_REG + j * 4);
+ p[223 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_SW_VLAN_TAG_DISC_0_REG + j * 4);
+ p[226 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_IN_DATA_STP_DISC_0_REG + j * 4);
+ }
+
+ p[229] = dsaf_read_dev(ddev, DSAF_INODE_GE_FC_EN_0_REG + port * 4);
+
+ for (i = 0; i < DSAF_INODE_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[230 + i] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC0_IN_PKT_NUM_0_REG + j * 4);
+ }
+
+ p[233] = dsaf_read_dev(ddev,
+ DSAF_INODE_VC1_IN_PKT_NUM_0_REG + port * 0x80);
+
+ /* dsaf inode registers */
+ for (i = 0; i < HNS_DSAF_SBM_NUM(ddev) / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[234 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_CFG_REG_0_REG + j * 0x80);
+ p[237 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_0_XGE_REG_0_REG + j * 0x80);
+ p[240 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_1_REG_0_REG + j * 0x80);
+ p[243 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_2_XGE_REG_0_REG + j * 0x80);
+ p[246 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_0_0_REG + j * 0x80);
+ p[249 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_FREE_CNT_1_0_REG + j * 0x80);
+ p[252 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_0_0_REG + j * 0x80);
+ p[255 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_1_0_REG + j * 0x80);
+ p[258 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_2_0_REG + j * 0x80);
+ p[261 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CNT_3_0_REG + j * 0x80);
+ p[264 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INER_ST_0_REG + j * 0x80);
+ p[267 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_MIB_REQ_FAILED_TC_0_REG + j * 0x80);
+ p[270 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_CNT_0_REG + j * 0x80);
+ p[273 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_DROP_CNT_0_REG + j * 0x80);
+ p[276 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_INF_OUTPORT_CNT_0_REG + j * 0x80);
+ p[279 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG + j * 0x80);
+ p[282 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG + j * 0x80);
+ p[285 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG + j * 0x80);
+ p[288 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG + j * 0x80);
+ p[291 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG + j * 0x80);
+ p[294 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG + j * 0x80);
+ p[297 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG + j * 0x80);
+ p[300 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG + j * 0x80);
+ p[303 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_REQ_CNT_0_REG + j * 0x80);
+ p[306 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_LNK_RELS_CNT_0_REG + j * 0x80);
+ p[309 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_3_REG_0_REG + j * 0x80);
+ p[312 + i] = dsaf_read_dev(ddev,
+ DSAF_SBM_BP_CFG_4_REG_0_REG + j * 0x80);
+ }
+
+ /* dsaf onode registers */
+ for (i = 0; i < DSAF_XOD_NUM; i++) {
+ p[315 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG + i * 0x90);
+ p[323 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG + i * 0x90);
+ p[331 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG + i * 0x90);
+ p[339 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG + i * 0x90);
+ p[347 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG + i * 0x90);
+ p[355 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_ETS_TOKEN_CFG_0_REG + i * 0x90);
+ }
+
+ p[363] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_0_0_REG + port * 0x90);
+ p[364] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_1_0_REG + port * 0x90);
+ p[365] = dsaf_read_dev(ddev, DSAF_XOD_PFS_CFG_2_0_REG + port * 0x90);
+
+ for (i = 0; i < DSAF_XOD_BIG_NUM / DSAF_COMM_CHN; i++) {
+ j = i * DSAF_COMM_CHN + port;
+ p[366 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_L_0_REG + j * 0x90);
+ p[369 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_GNT_H_0_REG + j * 0x90);
+ p[372 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_CONNECT_STATE_0_REG + j * 0x90);
+ p[375 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVPKT_CNT_0_REG + j * 0x90);
+ p[378 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC0_CNT_0_REG + j * 0x90);
+ p[381 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC1_CNT_0_REG + j * 0x90);
+ p[384 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC2_CNT_0_REG + j * 0x90);
+ p[387 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVTC3_CNT_0_REG + j * 0x90);
+ p[390 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC0_CNT_0_REG + j * 0x90);
+ p[393 + i] = dsaf_read_dev(ddev,
+ DSAF_XOD_RCVVC1_CNT_0_REG + j * 0x90);
+ }
+
+ p[396] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[397] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[398] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN2_CNT_0_REG + port * 0x90);
+ p[399] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN3_CNT_0_REG + port * 0x90);
+ p[400] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN4_CNT_0_REG + port * 0x90);
+ p[401] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN5_CNT_0_REG + port * 0x90);
+ p[402] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN6_CNT_0_REG + port * 0x90);
+ p[403] = dsaf_read_dev(ddev,
+ DSAF_XOD_XGE_RCVIN7_CNT_0_REG + port * 0x90);
+ p[404] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[405] = dsaf_read_dev(ddev,
+ DSAF_XOD_PPE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[406] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG + port * 0x90);
+ p[407] = dsaf_read_dev(ddev,
+ DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG + port * 0x90);
+ p[408] = dsaf_read_dev(ddev,
+ DSAF_XOD_FIFO_STATUS_0_REG + port * 0x90);
+
+ /* dsaf voq registers */
+ for (i = 0; i < DSAF_VOQ_NUM / DSAF_COMM_CHN; i++) {
+ j = (i * DSAF_COMM_CHN + port) * 0x90;
+ p[409 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_INVERT_EN_0_REG + j);
+ p[412 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_SRAM_PKT_NUM_0_REG + j);
+ p[415 + i] = dsaf_read_dev(ddev, DSAF_VOQ_IN_PKT_NUM_0_REG + j);
+ p[418 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_OUT_PKT_NUM_0_REG + j);
+ p[421 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ECC_ERR_ADDR_0_REG + j);
+ p[424 + i] = dsaf_read_dev(ddev, DSAF_VOQ_BP_STATUS_0_REG + j);
+ p[427 + i] = dsaf_read_dev(ddev, DSAF_VOQ_SPUP_IDLE_0_REG + j);
+ p[430 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_0_0_REG + j);
+ p[433 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_XGE_XOD_REQ_1_0_REG + j);
+ p[436 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_PPE_XOD_REQ_0_REG + j);
+ p[439 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_ROCEE_XOD_REQ_0_REG + j);
+ p[442 + i] = dsaf_read_dev(ddev,
+ DSAF_VOQ_BP_ALL_THRD_0_REG + j);
+ }
+
+ /* dsaf tbl registers */
+ p[445] = dsaf_read_dev(ddev, DSAF_TBL_CTRL_0_REG);
+ p[446] = dsaf_read_dev(ddev, DSAF_TBL_INT_MSK_0_REG);
+ p[447] = dsaf_read_dev(ddev, DSAF_TBL_INT_SRC_0_REG);
+ p[448] = dsaf_read_dev(ddev, DSAF_TBL_INT_STS_0_REG);
+ p[449] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_ADDR_0_REG);
+ p[450] = dsaf_read_dev(ddev, DSAF_TBL_LINE_ADDR_0_REG);
+ p[451] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_HIGH_0_REG);
+ p[452] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_LOW_0_REG);
+ p[453] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_4_0_REG);
+ p[454] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_3_0_REG);
+ p[455] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_2_0_REG);
+ p[456] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_1_0_REG);
+ p[457] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_MCAST_CFG_0_0_REG);
+ p[458] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_UCAST_CFG_0_REG);
+ p[459] = dsaf_read_dev(ddev, DSAF_TBL_LIN_CFG_0_REG);
+ p[460] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_HIGH_0_REG);
+ p[461] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RDATA_LOW_0_REG);
+ p[462] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA4_0_REG);
+ p[463] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA3_0_REG);
+ p[464] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA2_0_REG);
+ p[465] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA1_0_REG);
+ p[466] = dsaf_read_dev(ddev, DSAF_TBL_TCAM_RAM_RDATA0_0_REG);
+ p[467] = dsaf_read_dev(ddev, DSAF_TBL_LIN_RDATA_0_REG);
+
+ for (i = 0; i < DSAF_SW_PORT_NUM; i++) {
+ j = i * 0x8;
+ p[468 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO1_0_REG + j);
+ p[469 + 2 * i] = dsaf_read_dev(ddev,
+ DSAF_TBL_DA0_MIS_INFO0_0_REG + j);
+ }
+
+ p[484] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO2_0_REG);
+ p[485] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO1_0_REG);
+ p[486] = dsaf_read_dev(ddev, DSAF_TBL_SA_MIS_INFO0_0_REG);
+ p[487] = dsaf_read_dev(ddev, DSAF_TBL_PUL_0_REG);
+ p[488] = dsaf_read_dev(ddev, DSAF_TBL_OLD_RSLT_0_REG);
+ p[489] = dsaf_read_dev(ddev, DSAF_TBL_OLD_SCAN_VAL_0_REG);
+ p[490] = dsaf_read_dev(ddev, DSAF_TBL_DFX_CTRL_0_REG);
+ p[491] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_0_REG);
+ p[492] = dsaf_read_dev(ddev, DSAF_TBL_DFX_STAT_2_0_REG);
+ p[493] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_I_0_REG);
+ p[494] = dsaf_read_dev(ddev, DSAF_TBL_LKUP_NUM_O_0_REG);
+ p[495] = dsaf_read_dev(ddev, DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG);
+
+ /* dsaf other registers */
+ p[496] = dsaf_read_dev(ddev, DSAF_INODE_FIFO_WL_0_REG + port * 0x4);
+ p[497] = dsaf_read_dev(ddev, DSAF_ONODE_FIFO_WL_0_REG + port * 0x4);
+ p[498] = dsaf_read_dev(ddev, DSAF_XGE_GE_WORK_MODE_0_REG + port * 0x4);
+ p[499] = dsaf_read_dev(ddev,
+ DSAF_XGE_APP_RX_LINK_UP_0_REG + port * 0x4);
+ p[500] = dsaf_read_dev(ddev, DSAF_NETPORT_CTRL_SIG_0_REG + port * 0x4);
+ p[501] = dsaf_read_dev(ddev, DSAF_XGE_CTRL_SIG_CFG_0_REG + port * 0x4);
+
+ if (!is_ver1)
+ p[502] = dsaf_read_dev(ddev, DSAF_PAUSE_CFG_REG + port * 0x4);
+
+ /* mark end of dsaf regs */
+ for (i = 503; i < 504; i++)
+ p[i] = 0xdddddddd;
+}
+
+static char *hns_dsaf_get_node_stats_strings(char *data, int node,
+ struct dsaf_device *dsaf_dev)
+{
+ char *buff = data;
+ int i;
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_pad_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_manage_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pkt_id", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_rx_pause_frame", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_release_buf_num", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_sbm_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_crc_false_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_bp_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_lookup_rslt_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_local_rslt_fail_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_vlan_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "innod%d_stp_drop_pkts", node);
+ buff += ETH_GSTRING_LEN;
+ if (node < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ snprintf(buff + 0 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "inod%d_pfc_prio%d_pkts",
+ node, i);
+ snprintf(buff + 1 * ETH_GSTRING_LEN * DSAF_PRIO_NR,
+ ETH_GSTRING_LEN, "onod%d_pfc_prio%d_pkts",
+ node, i);
+ buff += ETH_GSTRING_LEN;
+ }
+ buff += 1 * DSAF_PRIO_NR * ETH_GSTRING_LEN;
+ }
+ snprintf(buff, ETH_GSTRING_LEN, "onnod%d_tx_pkts", node);
+ buff += ETH_GSTRING_LEN;
+
+ return buff;
+}
+
+static u64 *hns_dsaf_get_node_stats(struct dsaf_device *ddev, u64 *data,
+ int node_num)
+{
+ u64 *p = data;
+ int i;
+ struct dsaf_hw_stats *hw_stats = &ddev->hw_stats[node_num];
+ bool is_ver1 = AE_IS_VER1(ddev->dsaf_ver);
+
+ p[0] = hw_stats->pad_drop;
+ p[1] = hw_stats->man_pkts;
+ p[2] = hw_stats->rx_pkts;
+ p[3] = hw_stats->rx_pkt_id;
+ p[4] = hw_stats->rx_pause_frame;
+ p[5] = hw_stats->release_buf_num;
+ p[6] = hw_stats->sbm_drop;
+ p[7] = hw_stats->crc_false;
+ p[8] = hw_stats->bp_drop;
+ p[9] = hw_stats->rslt_drop;
+ p[10] = hw_stats->local_addr_false;
+ p[11] = hw_stats->vlan_drop;
+ p[12] = hw_stats->stp_drop;
+ if (node_num < DSAF_SERVICE_NW_NUM && !is_ver1) {
+ for (i = 0; i < DSAF_PRIO_NR; i++) {
+ p[13 + i + 0 * DSAF_PRIO_NR] = hw_stats->rx_pfc[i];
+ p[13 + i + 1 * DSAF_PRIO_NR] = hw_stats->tx_pfc[i];
+ }
+ p[29] = hw_stats->tx_pkts;
+ return &p[30];
+ }
+
+ p[13] = hw_stats->tx_pkts;
+ return &p[14];
+}
+
+/**
+ *hns_dsaf_get_stats - get dsaf statistic
+ *@ddev: dsaf device
+ *@data:statistic value
+ *@port: port num
+ */
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port)
+{
+ u64 *p = data;
+ int node_num = port;
+
+ /* for ge/xge node info */
+ p = hns_dsaf_get_node_stats(ddev, p, node_num);
+
+ /* for ppe node info */
+ node_num = port + DSAF_PPE_INODE_BASE;
+ (void)hns_dsaf_get_node_stats(ddev, p, node_num);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf string set count
+ *@dsaf_dev: dsaf device
+ *@stringset: type of values in data
+ *return dsaf string name count
+ */
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset)
+{
+ bool is_ver1 = AE_IS_VER1(dsaf_dev->dsaf_ver);
+
+ if (stringset == ETH_SS_STATS) {
+ if (is_ver1)
+ return DSAF_STATIC_NUM;
+ else
+ return DSAF_V2_STATIC_NUM;
+ }
+ return 0;
+}
+
+/**
+ *hns_dsaf_get_strings - get dsaf string set
+ *@stringset:srting set index
+ *@data:strings name value
+ *@port:port index
+ *@dsaf_dev: dsaf device
+ */
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev)
+{
+ char *buff = (char *)data;
+ int node = port;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ /* for ge/xge node info */
+ buff = hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+
+ /* for ppe node info */
+ node = port + DSAF_PPE_INODE_BASE;
+ (void)hns_dsaf_get_node_stats_strings(buff, node, dsaf_dev);
+}
+
+/**
+ *hns_dsaf_get_sset_count - get dsaf regs count
+ *return dsaf regs count
+ */
+int hns_dsaf_get_regs_count(void)
+{
+ return DSAF_DUMP_REGS_NUM;
+}
+
+static int hns_dsaf_get_port_id(u8 port)
+{
+ if (port < DSAF_SERVICE_NW_NUM)
+ return port;
+
+ if (port >= DSAF_BASE_INNER_PORT_NUM)
+ return port - DSAF_BASE_INNER_PORT_NUM + DSAF_SERVICE_NW_NUM;
+
+ return -EINVAL;
+}
+
+static void set_promisc_tcam_enable(struct dsaf_device *dsaf_dev, u32 port)
+{
+ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 1, 0, 0, 0x80};
+ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+ struct dsaf_tbl_tcam_data tbl_tcam_mask_uc = {0x01000000, 0xf};
+ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, port};
+ struct dsaf_drv_mac_single_dest_entry mask_entry;
+ struct dsaf_drv_tbl_tcam_key temp_key, mask_key;
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ struct hns_mac_cb *mac_cb;
+ u8 addr[ETH_ALEN] = {0};
+ u8 port_num;
+ int mskid;
+
+ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+ if (entry_index != DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* put promisc tcam entry in the end. */
+ /* 1. set promisc unicast vague tcam entry. */
+ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ dev_err(dsaf_dev->dev,
+ "enable uc promisc failed (port:%#x)\n",
+ port);
+ return;
+ }
+
+ mac_cb = dsaf_dev->mac_cb[port];
+ (void)hns_mac_get_inner_port_num(mac_cb, 0, &port_num);
+ tbl_tcam_ucast.tbl_ucast_out_port = port_num;
+
+ /* config uc vague table */
+ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+ &tbl_tcam_mask_uc, &tbl_tcam_ucast);
+
+ /* update software entry */
+ soft_mac_entry = priv->soft_mac_tbl;
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = mac_key.high.val;
+ soft_mac_entry->tcam_key.low.val = mac_key.low.val;
+ /* step back to the START for mc. */
+ soft_mac_entry = priv->soft_mac_tbl;
+
+ /* 2. set promisc multicast vague tcam entry. */
+ entry_index = hns_dsaf_find_empty_mac_entry_reverse(dsaf_dev);
+ if (entry_index == DSAF_INVALID_ENTRY_IDX) {
+ dev_err(dsaf_dev->dev,
+ "enable mc promisc failed (port:%#x)\n",
+ port);
+ return;
+ }
+
+ memset(&mask_entry, 0x0, sizeof(mask_entry));
+ memset(&mask_key, 0x0, sizeof(mask_key));
+ memset(&temp_key, 0x0, sizeof(temp_key));
+ mask_entry.addr[0] = 0x01;
+ hns_dsaf_set_mac_key(dsaf_dev, &mask_key, mask_entry.in_vlan_id,
+ 0xf, mask_entry.addr);
+ tbl_tcam_mcast.tbl_mcast_item_vld = 1;
+ tbl_tcam_mcast.tbl_mcast_old_en = 0;
+
+ /* set MAC port to handle multicast */
+ mskid = hns_dsaf_get_port_id(port);
+ if (mskid == -EINVAL) {
+ dev_err(dsaf_dev->dev, "%s,pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port,
+ mask_key.high.val, mask_key.low.val);
+ return;
+ }
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+
+ /* set pool bit map to handle multicast */
+ mskid = hns_dsaf_get_port_id(port_num);
+ if (mskid == -EINVAL) {
+ dev_err(dsaf_dev->dev,
+ "%s, pool bit map pnum(%d)error,key(%#x:%#x)\n",
+ dsaf_dev->ae_dev.name, port_num,
+ mask_key.high.val, mask_key.low.val);
+ return;
+ }
+ dsaf_set_bit(tbl_tcam_mcast.tbl_mcast_port_msk[mskid / 32],
+ mskid % 32, 1);
+
+ memcpy(&temp_key, &mask_key, sizeof(mask_key));
+ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+ (struct dsaf_tbl_tcam_data *)(&mask_key),
+ &tbl_tcam_mcast);
+
+ /* update software entry */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = entry_index;
+ soft_mac_entry->tcam_key.high.val = temp_key.high.val;
+ soft_mac_entry->tcam_key.low.val = temp_key.low.val;
+}
+
+static void set_promisc_tcam_disable(struct dsaf_device *dsaf_dev, u32 port)
+{
+ struct dsaf_tbl_tcam_data tbl_tcam_data_mc = {0x01000000, port};
+ struct dsaf_tbl_tcam_ucast_cfg tbl_tcam_ucast = {0, 0, 0, 0, 0};
+ struct dsaf_tbl_tcam_mcast_cfg tbl_tcam_mcast = {0, 0, {0} };
+ struct dsaf_drv_priv *priv = hns_dsaf_dev_priv(dsaf_dev);
+ struct dsaf_tbl_tcam_data tbl_tcam_data_uc = {0, 0};
+ struct dsaf_tbl_tcam_data tbl_tcam_mask = {0, 0};
+ struct dsaf_drv_soft_mac_tbl *soft_mac_entry;
+ u16 entry_index = DSAF_INVALID_ENTRY_IDX;
+ struct dsaf_drv_tbl_tcam_key mac_key;
+ u8 addr[ETH_ALEN] = {0};
+
+ /* 1. delete uc vague tcam entry. */
+ /* promisc use vague table match with vlanid = 0 & macaddr = 0 */
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+ if (entry_index == DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* config uc vague table */
+ hns_dsaf_tcam_uc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_uc,
+ &tbl_tcam_mask, &tbl_tcam_ucast);
+ /* update soft management table. */
+ soft_mac_entry = priv->soft_mac_tbl;
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+ /* step back to the START for mc. */
+ soft_mac_entry = priv->soft_mac_tbl;
+
+ /* 2. delete mc vague tcam entry. */
+ addr[0] = 0x01;
+ memset(&mac_key, 0x0, sizeof(mac_key));
+ hns_dsaf_set_mac_key(dsaf_dev, &mac_key, 0x00, port, addr);
+ entry_index = hns_dsaf_find_soft_mac_entry(dsaf_dev, &mac_key);
+
+ if (entry_index == DSAF_INVALID_ENTRY_IDX)
+ return;
+
+ /* config mc vague table */
+ hns_dsaf_tcam_mc_cfg_vague(dsaf_dev, entry_index, &tbl_tcam_data_mc,
+ &tbl_tcam_mask, &tbl_tcam_mcast);
+ /* update soft management table. */
+ soft_mac_entry += entry_index;
+ soft_mac_entry->index = DSAF_INVALID_ENTRY_IDX;
+}
+
+/* Reserve the last TCAM entry for promisc support */
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+ u32 port, bool enable)
+{
+ if (enable)
+ set_promisc_tcam_enable(dsaf_dev, port);
+ else
+ set_promisc_tcam_disable(dsaf_dev, port);
+}
+
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port)
+{
+ u32 val, val_tmp;
+ int wait_cnt;
+
+ if (port >= DSAF_SERVICE_NW_NUM)
+ return 0;
+
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ val = dsaf_read_dev(dsaf_dev, DSAF_VOQ_IN_PKT_NUM_0_REG +
+ (port + DSAF_XGE_NUM) * 0x40);
+ val_tmp = dsaf_read_dev(dsaf_dev, DSAF_VOQ_OUT_PKT_NUM_0_REG +
+ (port + DSAF_XGE_NUM) * 0x40);
+ if (val == val_tmp)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(dsaf_dev->dev, "hns dsaf clean wait timeout(%u - %u).\n",
+ val, val_tmp);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * dsaf_probe - probo dsaf dev
+ * @pdev: dasf platform device
+ * return 0 - success , negative --fail
+ */
+static int hns_dsaf_probe(struct platform_device *pdev)
+{
+ struct dsaf_device *dsaf_dev;
+ int ret;
+
+ dsaf_dev = hns_dsaf_alloc_dev(&pdev->dev, sizeof(struct dsaf_drv_priv));
+ if (IS_ERR(dsaf_dev)) {
+ ret = PTR_ERR(dsaf_dev);
+ dev_err(&pdev->dev,
+ "dsaf_probe dsaf_alloc_dev failed, ret = %#x!\n", ret);
+ return ret;
+ }
+
+ ret = hns_dsaf_get_cfg(dsaf_dev);
+ if (ret)
+ goto free_dev;
+
+ ret = hns_dsaf_init(dsaf_dev);
+ if (ret)
+ goto free_dev;
+
+ ret = hns_mac_init(dsaf_dev);
+ if (ret)
+ goto uninit_dsaf;
+
+ ret = hns_ppe_init(dsaf_dev);
+ if (ret)
+ goto uninit_mac;
+
+ ret = hns_dsaf_ae_init(dsaf_dev);
+ if (ret)
+ goto uninit_ppe;
+
+ return 0;
+
+uninit_ppe:
+ hns_ppe_uninit(dsaf_dev);
+
+uninit_mac:
+ hns_mac_uninit(dsaf_dev);
+
+uninit_dsaf:
+ hns_dsaf_free(dsaf_dev);
+
+free_dev:
+ hns_dsaf_free_dev(dsaf_dev);
+
+ return ret;
+}
+
+/**
+ * dsaf_remove - remove dsaf dev
+ * @pdev: dasf platform device
+ */
+static int hns_dsaf_remove(struct platform_device *pdev)
+{
+ struct dsaf_device *dsaf_dev = dev_get_drvdata(&pdev->dev);
+
+ hns_dsaf_ae_uninit(dsaf_dev);
+
+ hns_ppe_uninit(dsaf_dev);
+
+ hns_mac_uninit(dsaf_dev);
+
+ hns_dsaf_free(dsaf_dev);
+
+ hns_dsaf_free_dev(dsaf_dev);
+
+ return 0;
+}
+
+static const struct of_device_id g_dsaf_match[] = {
+ {.compatible = "hisilicon,hns-dsaf-v1"},
+ {.compatible = "hisilicon,hns-dsaf-v2"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, g_dsaf_match);
+
+static struct platform_driver g_dsaf_driver = {
+ .probe = hns_dsaf_probe,
+ .remove = hns_dsaf_remove,
+ .driver = {
+ .name = DSAF_DRV_NAME,
+ .of_match_table = g_dsaf_match,
+ .acpi_match_table = hns_dsaf_acpi_match,
+ },
+};
+
+module_platform_driver(g_dsaf_driver);
+
+/**
+ * hns_dsaf_roce_reset - reset dsaf and roce
+ * @dsaf_fwnode: Pointer to framework node for the dasf
+ * @dereset: false - request reset , true - drop reset
+ * return 0 - success , negative -fail
+ */
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset)
+{
+ struct dsaf_device *dsaf_dev;
+ struct platform_device *pdev;
+ u32 mp;
+ u32 sl;
+ u32 credit;
+ int i;
+ static const u32 port_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ {DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1, DSAF_ROCE_PORT_0},
+ {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_4, DSAF_ROCE_PORT_2, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
+ {DSAF_ROCE_PORT_5, DSAF_ROCE_PORT_3, DSAF_ROCE_PORT_1},
+ };
+ static const u32 sl_map[DSAF_ROCE_CREDIT_CHN][DSAF_ROCE_CHAN_MODE_NUM] = {
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_0},
+ {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_1},
+ {DSAF_ROCE_SL_0, DSAF_ROCE_SL_0, DSAF_ROCE_SL_2},
+ {DSAF_ROCE_SL_1, DSAF_ROCE_SL_1, DSAF_ROCE_SL_3},
+ };
+
+ /* find the platform device corresponding to fwnode */
+ if (is_of_node(dsaf_fwnode)) {
+ pdev = of_find_device_by_node(to_of_node(dsaf_fwnode));
+ } else if (is_acpi_device_node(dsaf_fwnode)) {
+ pdev = hns_dsaf_find_platform_device(dsaf_fwnode);
+ } else {
+ pr_err("fwnode is neither OF or ACPI type\n");
+ return -EINVAL;
+ }
+
+ /* check if we were a success in fetching pdev */
+ if (!pdev) {
+ pr_err("couldn't find platform device for node\n");
+ return -ENODEV;
+ }
+
+ /* retrieve the dsaf_device from the driver data */
+ dsaf_dev = dev_get_drvdata(&pdev->dev);
+ if (!dsaf_dev) {
+ dev_err(&pdev->dev, "dsaf_dev is NULL\n");
+ put_device(&pdev->dev);
+ return -ENODEV;
+ }
+
+ /* now, make sure we are running on compatible SoC */
+ if (AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ dev_err(dsaf_dev->dev, "%s v1 chip doesn't support RoCE!\n",
+ dsaf_dev->ae_dev.name);
+ put_device(&pdev->dev);
+ return -ENODEV;
+ }
+
+ /* do reset or de-reset according to the flag */
+ if (!dereset) {
+ /* reset rocee-channels in dsaf and rocee */
+ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
+ false);
+ dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, false);
+ } else {
+ /* configure dsaf tx roce correspond to port map and sl map */
+ mp = dsaf_read_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG);
+ for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
+ dsaf_set_field(mp, 7 << i * 3, i * 3,
+ port_map[i][DSAF_ROCE_6PORT_MODE]);
+ dsaf_set_field(mp, 3 << i * 3, i * 3, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_ROCE_PORT_MAP_REG, mp);
+
+ sl = dsaf_read_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG);
+ for (i = 0; i < DSAF_ROCE_CREDIT_CHN; i++)
+ dsaf_set_field(sl, 3 << i * 2, i * 2,
+ sl_map[i][DSAF_ROCE_6PORT_MODE]);
+ dsaf_write_dev(dsaf_dev, DSAF_ROCE_SL_MAP_REG, sl);
+
+ /* de-reset rocee-channels in dsaf and rocee */
+ dsaf_dev->misc_op->hns_dsaf_srst_chns(dsaf_dev, DSAF_CHNS_MASK,
+ true);
+ msleep(SRST_TIME_INTERVAL);
+ dsaf_dev->misc_op->hns_dsaf_roce_srst(dsaf_dev, true);
+
+ /* enable dsaf channel rocee credit */
+ credit = dsaf_read_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG);
+ dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
+
+ dsaf_set_bit(credit, DSAF_SBM_ROCEE_CFG_CRD_EN_B, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_SBM_ROCEE_CFG_REG_REG, credit);
+ }
+
+ put_device(&pdev->dev);
+
+ return 0;
+}
+EXPORT_SYMBOL(hns_dsaf_roce_reset);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HNS DSAF driver");
+MODULE_VERSION(DSAF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
new file mode 100644
index 000000000..cba04bfa0
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_main.h
@@ -0,0 +1,468 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef __HNS_DSAF_MAIN_H
+#define __HNS_DSAF_MAIN_H
+#include "hnae.h"
+
+#include "hns_dsaf_reg.h"
+#include "hns_dsaf_mac.h"
+
+struct hns_mac_cb;
+
+#define DSAF_DRV_NAME "hns_dsaf"
+#define DSAF_MOD_VERSION "v1.0"
+#define DSAF_DEVICE_NAME "dsaf"
+
+#define HNS_DSAF_DEBUG_NW_REG_OFFSET 0x100000
+
+#define DSAF_BASE_INNER_PORT_NUM 127/* mac tbl qid*/
+
+#define DSAF_MAX_CHIP_NUM 2 /*max 2 chips */
+
+#define DSAF_DEFAUTL_QUEUE_NUM_PER_PPE 22
+
+#define HNS_DSAF_MAX_DESC_CNT 1024
+#define HNS_DSAF_MIN_DESC_CNT 16
+
+#define DSAF_INVALID_ENTRY_IDX 0xffff
+
+#define DSAF_CFG_READ_CNT 30
+
+#define DSAF_DUMP_REGS_NUM 504
+#define DSAF_STATIC_NUM 28
+#define DSAF_V2_STATIC_NUM 44
+#define DSAF_PRIO_NR 8
+#define DSAF_REG_PER_ZONE 3
+
+#define DSAF_ROCE_CREDIT_CHN 8
+#define DSAF_ROCE_CHAN_MODE 3
+
+#define HNS_MAX_WAIT_CNT 10000
+
+enum dsaf_roce_port_mode {
+ DSAF_ROCE_6PORT_MODE,
+ DSAF_ROCE_4PORT_MODE,
+ DSAF_ROCE_2PORT_MODE,
+ DSAF_ROCE_CHAN_MODE_NUM,
+};
+
+enum dsaf_roce_port_num {
+ DSAF_ROCE_PORT_0,
+ DSAF_ROCE_PORT_1,
+ DSAF_ROCE_PORT_2,
+ DSAF_ROCE_PORT_3,
+ DSAF_ROCE_PORT_4,
+ DSAF_ROCE_PORT_5,
+};
+
+enum dsaf_roce_qos_sl {
+ DSAF_ROCE_SL_0,
+ DSAF_ROCE_SL_1,
+ DSAF_ROCE_SL_2,
+ DSAF_ROCE_SL_3,
+};
+
+#define DSAF_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HNS_DSAF_IS_DEBUG(dev) ((dev)->dsaf_mode == DSAF_MODE_DISABLE_SP)
+
+enum hal_dsaf_mode {
+ HRD_DSAF_NO_DSAF_MODE = 0x0,
+ HRD_DSAF_MODE = 0x1,
+};
+
+enum hal_dsaf_tc_mode {
+ HRD_DSAF_4TC_MODE = 0X0,
+ HRD_DSAF_8TC_MODE = 0X1,
+};
+
+struct dsaf_vm_def_vlan {
+ u32 vm_def_vlan_id;
+ u32 vm_def_vlan_cfi;
+ u32 vm_def_vlan_pri;
+};
+
+struct dsaf_tbl_tcam_data {
+ u32 tbl_tcam_data_high;
+ u32 tbl_tcam_data_low;
+};
+
+#define DSAF_PORT_MSK_NUM \
+ ((DSAF_TOTAL_QUEUE_NUM + DSAF_SERVICE_NW_NUM - 1) / 32 + 1)
+struct dsaf_tbl_tcam_mcast_cfg {
+ u8 tbl_mcast_old_en;
+ u8 tbl_mcast_item_vld;
+ u32 tbl_mcast_port_msk[DSAF_PORT_MSK_NUM];
+};
+
+struct dsaf_tbl_tcam_ucast_cfg {
+ u32 tbl_ucast_old_en;
+ u32 tbl_ucast_item_vld;
+ u32 tbl_ucast_mac_discard;
+ u32 tbl_ucast_dvc;
+ u32 tbl_ucast_out_port;
+};
+
+struct dsaf_tbl_line_cfg {
+ u32 tbl_line_mac_discard;
+ u32 tbl_line_dvc;
+ u32 tbl_line_out_port;
+};
+
+enum dsaf_port_rate_mode {
+ DSAF_PORT_RATE_1000 = 0,
+ DSAF_PORT_RATE_2500,
+ DSAF_PORT_RATE_10000
+};
+
+enum dsaf_stp_port_type {
+ DSAF_STP_PORT_TYPE_DISCARD = 0,
+ DSAF_STP_PORT_TYPE_BLOCK = 1,
+ DSAF_STP_PORT_TYPE_LISTEN = 2,
+ DSAF_STP_PORT_TYPE_LEARN = 3,
+ DSAF_STP_PORT_TYPE_FORWARD = 4
+};
+
+enum dsaf_sw_port_type {
+ DSAF_SW_PORT_TYPE_NON_VLAN = 0,
+ DSAF_SW_PORT_TYPE_ACCESS = 1,
+ DSAF_SW_PORT_TYPE_TRUNK = 2,
+};
+
+#define DSAF_SUB_BASE_SIZE (0x10000)
+
+/* dsaf mode define */
+enum dsaf_mode {
+ DSAF_MODE_INVALID = 0, /**< Invalid dsaf mode */
+ DSAF_MODE_ENABLE_FIX, /**< en DSAF-mode, fixed to queue*/
+ DSAF_MODE_ENABLE_0VM, /**< en DSAF-mode, support 0 VM */
+ DSAF_MODE_ENABLE_8VM, /**< en DSAF-mode, support 8 VM */
+ DSAF_MODE_ENABLE_16VM, /**< en DSAF-mode, support 16 VM */
+ DSAF_MODE_ENABLE_32VM, /**< en DSAF-mode, support 32 VM */
+ DSAF_MODE_ENABLE_128VM, /**< en DSAF-mode, support 128 VM */
+ DSAF_MODE_ENABLE, /**< before is enable DSAF mode*/
+ DSAF_MODE_DISABLE_SP, /* <non-dsaf, single port mode */
+ DSAF_MODE_DISABLE_FIX, /**< non-dasf, fixed to queue*/
+ DSAF_MODE_DISABLE_2PORT_8VM, /**< non-dasf, 2port 8VM */
+ DSAF_MODE_DISABLE_2PORT_16VM, /**< non-dasf, 2port 16VM */
+ DSAF_MODE_DISABLE_2PORT_64VM, /**< non-dasf, 2port 64VM */
+ DSAF_MODE_DISABLE_6PORT_0VM, /**< non-dasf, 6port 0VM */
+ DSAF_MODE_DISABLE_6PORT_2VM, /**< non-dasf, 6port 2VM */
+ DSAF_MODE_DISABLE_6PORT_4VM, /**< non-dasf, 6port 4VM */
+ DSAF_MODE_DISABLE_6PORT_16VM, /**< non-dasf, 6port 16VM */
+ DSAF_MODE_MAX /**< the last one, use as the num */
+};
+
+#define DSAF_DEST_PORT_NUM 256 /* DSAF max port num */
+#define DSAF_WORD_BIT_CNT 32 /* the num bit of word */
+
+/*mac entry, mc or uc entry*/
+struct dsaf_drv_mac_single_dest_entry {
+ /* mac addr, match the entry*/
+ u8 addr[ETH_ALEN];
+ u16 in_vlan_id; /* value of VlanId */
+
+ /* the vld input port num, dsaf-mode fix 0, */
+ /* non-dasf is the entry whitch port vld*/
+ u8 in_port_num;
+
+ u8 port_num; /*output port num*/
+ u8 rsv[6];
+};
+
+/*only mc entry*/
+struct dsaf_drv_mac_multi_dest_entry {
+ /* mac addr, match the entry*/
+ u8 addr[ETH_ALEN];
+ u16 in_vlan_id;
+ /* this mac addr output port,*/
+ /* bit0-bit5 means Port0-Port5(1bit is vld)**/
+ u32 port_mask[DSAF_DEST_PORT_NUM / DSAF_WORD_BIT_CNT];
+
+ /* the vld input port num, dsaf-mode fix 0,*/
+ /* non-dasf is the entry whitch port vld*/
+ u8 in_port_num;
+ u8 rsv[7];
+};
+
+struct dsaf_hw_stats {
+ u64 pad_drop;
+ u64 man_pkts;
+ u64 rx_pkts;
+ u64 rx_pkt_id;
+ u64 rx_pause_frame;
+ u64 release_buf_num;
+ u64 sbm_drop;
+ u64 crc_false;
+ u64 bp_drop;
+ u64 rslt_drop;
+ u64 local_addr_false;
+ u64 vlan_drop;
+ u64 stp_drop;
+ u64 rx_pfc[DSAF_PRIO_NR];
+ u64 tx_pfc[DSAF_PRIO_NR];
+ u64 tx_pkts;
+};
+
+struct hnae_vf_cb {
+ u8 port_index;
+ struct hns_mac_cb *mac_cb;
+ struct dsaf_device *dsaf_dev;
+ struct hnae_handle ae_handle; /* must be the last number */
+};
+
+struct dsaf_int_xge_src {
+ u32 xid_xge_ecc_err_int_src;
+ u32 xid_xge_fsm_timout_int_src;
+ u32 sbm_xge_lnk_fsm_timout_int_src;
+ u32 sbm_xge_lnk_ecc_2bit_int_src;
+ u32 sbm_xge_mib_req_failed_int_src;
+ u32 sbm_xge_mib_req_fsm_timout_int_src;
+ u32 sbm_xge_mib_rels_fsm_timout_int_src;
+ u32 sbm_xge_sram_ecc_2bit_int_src;
+ u32 sbm_xge_mib_buf_sum_err_int_src;
+ u32 sbm_xge_mib_req_extra_int_src;
+ u32 sbm_xge_mib_rels_extra_int_src;
+ u32 voq_xge_start_to_over_0_int_src;
+ u32 voq_xge_start_to_over_1_int_src;
+ u32 voq_xge_ecc_err_int_src;
+};
+
+struct dsaf_int_ppe_src {
+ u32 xid_ppe_fsm_timout_int_src;
+ u32 sbm_ppe_lnk_fsm_timout_int_src;
+ u32 sbm_ppe_lnk_ecc_2bit_int_src;
+ u32 sbm_ppe_mib_req_failed_int_src;
+ u32 sbm_ppe_mib_req_fsm_timout_int_src;
+ u32 sbm_ppe_mib_rels_fsm_timout_int_src;
+ u32 sbm_ppe_sram_ecc_2bit_int_src;
+ u32 sbm_ppe_mib_buf_sum_err_int_src;
+ u32 sbm_ppe_mib_req_extra_int_src;
+ u32 sbm_ppe_mib_rels_extra_int_src;
+ u32 voq_ppe_start_to_over_0_int_src;
+ u32 voq_ppe_ecc_err_int_src;
+ u32 xod_ppe_fifo_rd_empty_int_src;
+ u32 xod_ppe_fifo_wr_full_int_src;
+};
+
+struct dsaf_int_rocee_src {
+ u32 xid_rocee_fsm_timout_int_src;
+ u32 sbm_rocee_lnk_fsm_timout_int_src;
+ u32 sbm_rocee_lnk_ecc_2bit_int_src;
+ u32 sbm_rocee_mib_req_failed_int_src;
+ u32 sbm_rocee_mib_req_fsm_timout_int_src;
+ u32 sbm_rocee_mib_rels_fsm_timout_int_src;
+ u32 sbm_rocee_sram_ecc_2bit_int_src;
+ u32 sbm_rocee_mib_buf_sum_err_int_src;
+ u32 sbm_rocee_mib_req_extra_int_src;
+ u32 sbm_rocee_mib_rels_extra_int_src;
+ u32 voq_rocee_start_to_over_0_int_src;
+ u32 voq_rocee_ecc_err_int_src;
+};
+
+struct dsaf_int_tbl_src {
+ u32 tbl_da0_mis_src;
+ u32 tbl_da1_mis_src;
+ u32 tbl_da2_mis_src;
+ u32 tbl_da3_mis_src;
+ u32 tbl_da4_mis_src;
+ u32 tbl_da5_mis_src;
+ u32 tbl_da6_mis_src;
+ u32 tbl_da7_mis_src;
+ u32 tbl_sa_mis_src;
+ u32 tbl_old_sech_end_src;
+ u32 lram_ecc_err1_src;
+ u32 lram_ecc_err2_src;
+ u32 tram_ecc_err1_src;
+ u32 tram_ecc_err2_src;
+ u32 tbl_ucast_bcast_xge0_src;
+ u32 tbl_ucast_bcast_xge1_src;
+ u32 tbl_ucast_bcast_xge2_src;
+ u32 tbl_ucast_bcast_xge3_src;
+ u32 tbl_ucast_bcast_xge4_src;
+ u32 tbl_ucast_bcast_xge5_src;
+ u32 tbl_ucast_bcast_ppe_src;
+ u32 tbl_ucast_bcast_rocee_src;
+};
+
+struct dsaf_int_stat {
+ struct dsaf_int_xge_src dsaf_int_xge_stat[DSAF_COMM_CHN];
+ struct dsaf_int_ppe_src dsaf_int_ppe_stat[DSAF_COMM_CHN];
+ struct dsaf_int_rocee_src dsaf_int_rocee_stat[DSAF_COMM_CHN];
+ struct dsaf_int_tbl_src dsaf_int_tbl_stat[1];
+
+};
+
+struct dsaf_misc_op {
+ void (*cpld_set_led)(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data);
+ void (*cpld_reset_led)(struct hns_mac_cb *mac_cb);
+ int (*cpld_set_led_id)(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status);
+ /* reset series function, it will be reset if the dereset is 0 */
+ void (*dsaf_reset)(struct dsaf_device *dsaf_dev, bool dereset);
+ void (*xge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ge_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_srst)(struct dsaf_device *dsaf_dev, u32 port, bool dereset);
+ void (*ppe_comm_srst)(struct dsaf_device *dsaf_dev, bool dereset);
+ void (*hns_dsaf_srst_chns)(struct dsaf_device *dsaf_dev, u32 msk,
+ bool dereset);
+ void (*hns_dsaf_roce_srst)(struct dsaf_device *dsaf_dev, bool dereset);
+
+ phy_interface_t (*get_phy_if)(struct hns_mac_cb *mac_cb);
+ int (*get_sfp_prsnt)(struct hns_mac_cb *mac_cb, int *sfp_prsnt);
+
+ int (*cfg_serdes_loopback)(struct hns_mac_cb *mac_cb, bool en);
+};
+
+/* Dsaf device struct define ,and mac -> dsaf */
+struct dsaf_device {
+ struct device *dev;
+ struct hnae_ae_dev ae_dev;
+
+ u8 __iomem *sc_base;
+ u8 __iomem *sds_base;
+ u8 __iomem *ppe_base;
+ u8 __iomem *io_base;
+ struct regmap *sub_ctrl;
+ phys_addr_t ppe_paddr;
+
+ u32 desc_num; /* desc num per queue*/
+ u32 buf_size; /* ring buffer size */
+ u32 reset_offset; /* reset field offset in sub sysctrl */
+ int buf_size_type; /* ring buffer size-type */
+ enum dsaf_mode dsaf_mode; /* dsaf mode */
+ enum hal_dsaf_mode dsaf_en;
+ enum hal_dsaf_tc_mode dsaf_tc_mode;
+ u32 dsaf_ver;
+ u16 tcam_max_num; /* max TCAM entry for user except promisc */
+
+ struct ppe_common_cb *ppe_common[DSAF_COMM_DEV_NUM];
+ struct rcb_common_cb *rcb_common[DSAF_COMM_DEV_NUM];
+ struct hns_mac_cb *mac_cb[DSAF_MAX_PORT_NUM];
+ struct dsaf_misc_op *misc_op;
+
+ struct dsaf_hw_stats hw_stats[DSAF_NODE_NUM];
+ struct dsaf_int_stat int_stat;
+ /* make sure tcam table config spinlock */
+ spinlock_t tcam_lock;
+};
+
+static inline void *hns_dsaf_dev_priv(const struct dsaf_device *dsaf_dev)
+{
+ return (void *)((u8 *)dsaf_dev + sizeof(*dsaf_dev));
+}
+
+#define DSAF_TBL_TCAM_KEY_PORT_S 0
+#define DSAF_TBL_TCAM_KEY_PORT_M (((1ULL << 4) - 1) << 0)
+#define DSAF_TBL_TCAM_KEY_VLAN_S 4
+#define DSAF_TBL_TCAM_KEY_VLAN_M (((1ULL << 12) - 1) << 4)
+
+struct dsaf_drv_tbl_tcam_key {
+ union {
+ struct {
+ u8 mac_3;
+ u8 mac_2;
+ u8 mac_1;
+ u8 mac_0;
+ } bits;
+
+ u32 val;
+ } high;
+ union {
+ struct {
+ u16 port_vlan;
+ u8 mac_5;
+ u8 mac_4;
+ } bits;
+
+ u32 val;
+ } low;
+};
+
+struct dsaf_drv_soft_mac_tbl {
+ struct dsaf_drv_tbl_tcam_key tcam_key;
+ u16 index; /*the entry's index in tcam tab*/
+};
+
+struct dsaf_drv_priv {
+ /* soft tab Mac key, for hardware tab*/
+ struct dsaf_drv_soft_mac_tbl *soft_mac_tbl;
+};
+
+static inline void hns_dsaf_tbl_tcam_addr_cfg(struct dsaf_device *dsaf_dev,
+ u32 tab_tcam_addr)
+{
+ dsaf_set_dev_field(dsaf_dev, DSAF_TBL_TCAM_ADDR_0_REG,
+ DSAF_TBL_TCAM_ADDR_M, DSAF_TBL_TCAM_ADDR_S,
+ tab_tcam_addr);
+}
+
+static inline void hns_dsaf_tbl_tcam_load_pul(struct dsaf_device *dsaf_dev)
+{
+ u32 o_tbl_pul;
+
+ o_tbl_pul = dsaf_read_dev(dsaf_dev, DSAF_TBL_PUL_0_REG);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 1);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+ dsaf_set_bit(o_tbl_pul, DSAF_TBL_PUL_TCAM_LOAD_S, 0);
+ dsaf_write_dev(dsaf_dev, DSAF_TBL_PUL_0_REG, o_tbl_pul);
+}
+
+static inline void hns_dsaf_tbl_line_addr_cfg(struct dsaf_device *dsaf_dev,
+ u32 tab_line_addr)
+{
+ dsaf_set_dev_field(dsaf_dev, DSAF_TBL_LINE_ADDR_0_REG,
+ DSAF_TBL_LINE_ADDR_M, DSAF_TBL_LINE_ADDR_S,
+ tab_line_addr);
+}
+
+static inline struct hnae_vf_cb *hns_ae_get_vf_cb(
+ struct hnae_handle *handle)
+{
+ return container_of(handle, struct hnae_vf_cb, ae_handle);
+}
+
+int hns_dsaf_set_mac_uc_entry(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_add_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+int hns_dsaf_del_mac_entry(struct dsaf_device *dsaf_dev, u16 vlan_id,
+ u8 in_port_num, u8 *addr);
+int hns_dsaf_del_mac_mc_port(struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+void hns_dsaf_fix_mac_mode(struct hns_mac_cb *mac_cb);
+
+int hns_dsaf_ae_init(struct dsaf_device *dsaf_dev);
+void hns_dsaf_ae_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_dsaf_update_stats(struct dsaf_device *dsaf_dev, u32 inode_num);
+
+int hns_dsaf_get_sset_count(struct dsaf_device *dsaf_dev, int stringset);
+void hns_dsaf_get_stats(struct dsaf_device *ddev, u64 *data, int port);
+void hns_dsaf_get_strings(int stringset, u8 *data, int port,
+ struct dsaf_device *dsaf_dev);
+
+void hns_dsaf_get_regs(struct dsaf_device *ddev, u32 port, void *data);
+int hns_dsaf_get_regs_count(void);
+void hns_dsaf_set_promisc_mode(struct dsaf_device *dsaf_dev, u32 en);
+void hns_dsaf_set_promisc_tcam(struct dsaf_device *dsaf_dev,
+ u32 port, bool enable);
+
+void hns_dsaf_get_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 *en);
+int hns_dsaf_set_rx_mac_pause_en(struct dsaf_device *dsaf_dev, int mac_id,
+ u32 en);
+int hns_dsaf_rm_mac_addr(
+ struct dsaf_device *dsaf_dev,
+ struct dsaf_drv_mac_single_dest_entry *mac_entry);
+
+int hns_dsaf_clr_mac_mc_port(struct dsaf_device *dsaf_dev,
+ u8 mac_id, u8 port_num);
+int hns_dsaf_wait_pkt_clean(struct dsaf_device *dsaf_dev, int port);
+
+int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset);
+
+#endif /* __HNS_DSAF_MAIN_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
new file mode 100644
index 000000000..aa87e4d12
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c
@@ -0,0 +1,771 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_misc.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_reg.h"
+
+enum _dsm_op_index {
+ HNS_OP_RESET_FUNC = 0x1,
+ HNS_OP_SERDES_LP_FUNC = 0x2,
+ HNS_OP_LED_SET_FUNC = 0x3,
+ HNS_OP_GET_PORT_TYPE_FUNC = 0x4,
+ HNS_OP_GET_SFP_STAT_FUNC = 0x5,
+ HNS_OP_LOCATE_LED_SET_FUNC = 0x6,
+};
+
+enum _dsm_rst_type {
+ HNS_DSAF_RESET_FUNC = 0x1,
+ HNS_PPE_RESET_FUNC = 0x2,
+ HNS_XGE_RESET_FUNC = 0x4,
+ HNS_GE_RESET_FUNC = 0x5,
+ HNS_DSAF_CHN_RESET_FUNC = 0x6,
+ HNS_ROCE_RESET_FUNC = 0x7,
+};
+
+static const guid_t hns_dsaf_acpi_dsm_guid =
+ GUID_INIT(0x1A85AA1A, 0xE293, 0x415E,
+ 0x8E, 0x28, 0x8D, 0x69, 0x0A, 0x0F, 0x82, 0x0A);
+
+static void dsaf_write_sub(struct dsaf_device *dsaf_dev, u32 reg, u32 val)
+{
+ if (dsaf_dev->sub_ctrl)
+ dsaf_write_syscon(dsaf_dev->sub_ctrl, reg, val);
+ else
+ dsaf_write_reg(dsaf_dev->sc_base, reg, val);
+}
+
+static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg)
+{
+ u32 ret = 0;
+ int err;
+
+ if (dsaf_dev->sub_ctrl) {
+ err = dsaf_read_syscon(dsaf_dev->sub_ctrl, reg, &ret);
+ if (err)
+ dev_err(dsaf_dev->dev, "dsaf_read_syscon error %d!\n",
+ err);
+ } else {
+ ret = dsaf_read_reg(dsaf_dev->sc_base, reg);
+ }
+
+ return ret;
+}
+
+static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type,
+ u32 link, u32 port, u32 act)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = link;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = act;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n",
+ link, port, act);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
+static void hns_dsaf_acpi_locate_ledctrl_by_port(struct hns_mac_cb *mac_cb,
+ u8 op_type, u32 locate,
+ u32 port)
+{
+ union acpi_object obj_args[2], argv4;
+ union acpi_object *obj;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = locate;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_err(mac_cb->dev, "ledctrl fail, locate:%d port:%d!\n",
+ locate, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
+static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
+{
+ int speed_reg = 0;
+ u8 value;
+
+ if (!mac_cb) {
+ pr_err("sfp_led_opt mac_dev is null!\n");
+ return;
+ }
+ if (!mac_cb->cpld_ctrl) {
+ dev_err(mac_cb->dev, "mac_id=%d, cpld syscon is null !\n",
+ mac_cb->mac_id);
+ return;
+ }
+
+ if (speed == MAC_SPEED_10000)
+ speed_reg = 1;
+
+ value = mac_cb->cpld_led_value;
+
+ if (link_status) {
+ dsaf_set_bit(value, DSAF_LED_LINK_B, link_status);
+ dsaf_set_field(value, DSAF_LED_SPEED_M,
+ DSAF_LED_SPEED_S, speed_reg);
+ dsaf_set_bit(value, DSAF_LED_DATA_B, data);
+
+ if (value != mac_cb->cpld_led_value) {
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
+ mac_cb->cpld_led_value = value;
+ }
+ } else {
+ value = (mac_cb->cpld_led_value) & (0x1 << DSAF_LED_ANCHOR_B);
+ dsaf_write_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg, value);
+ mac_cb->cpld_led_value = value;
+ }
+}
+
+static void hns_cpld_set_led_acpi(struct hns_mac_cb *mac_cb, int link_status,
+ u16 speed, int data)
+{
+ if (!mac_cb) {
+ pr_err("cpld_led_set mac_cb is null!\n");
+ return;
+ }
+
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ link_status, mac_cb->mac_id, data);
+}
+
+static void cpld_led_reset(struct hns_mac_cb *mac_cb)
+{
+ if (!mac_cb || !mac_cb->cpld_ctrl)
+ return;
+
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ CPLD_LED_DEFAULT_VALUE);
+ mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE;
+}
+
+static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb)
+{
+ if (!mac_cb) {
+ pr_err("cpld_led_reset mac_cb is null!\n");
+ return;
+ }
+
+ if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER)
+ return;
+
+ hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC,
+ 0, mac_cb->mac_id, 0);
+}
+
+static int cpld_set_led_id(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ u32 val = 0;
+ int ret;
+
+ if (!mac_cb->cpld_ctrl)
+ return 0;
+
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ &val);
+ if (ret)
+ return ret;
+
+ dsaf_set_bit(val, DSAF_LED_ANCHOR_B, CPLD_LED_ON_VALUE);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ val);
+ mac_cb->cpld_led_value = val;
+ break;
+ case HNAE_LED_INACTIVE:
+ dsaf_set_bit(mac_cb->cpld_led_value, DSAF_LED_ANCHOR_B,
+ CPLD_LED_DEFAULT_VALUE);
+ dsaf_write_syscon(mac_cb->cpld_ctrl, mac_cb->cpld_ctrl_reg,
+ mac_cb->cpld_led_value);
+ break;
+ default:
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cpld_set_led_id_acpi(struct hns_mac_cb *mac_cb,
+ enum hnae_led_state status)
+{
+ switch (status) {
+ case HNAE_LED_ACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_ON_VALUE,
+ mac_cb->mac_id);
+ break;
+ case HNAE_LED_INACTIVE:
+ hns_dsaf_acpi_locate_ledctrl_by_port(mac_cb,
+ HNS_OP_LOCATE_LED_SET_FUNC,
+ CPLD_LED_DEFAULT_VALUE,
+ mac_cb->mac_id);
+ break;
+ default:
+ dev_err(mac_cb->dev, "invalid led state: %d!", status);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+#define RESET_REQ_OR_DREQ 1
+
+static void hns_dsaf_acpi_srst_by_port(struct dsaf_device *dsaf_dev, u8 op_type,
+ u32 port_type, u32 port, u32 val)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = port_type;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = port;
+ obj_args[2].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[2].integer.value = val;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 3;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(dsaf_dev->dev),
+ &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4);
+ if (!obj) {
+ dev_warn(dsaf_dev->dev, "reset port_type%d port%d fail!",
+ port_type, port);
+ return;
+ }
+
+ ACPI_FREE(obj);
+}
+
+static void hns_dsaf_rst(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ u32 xbar_reg_addr;
+ u32 nt_reg_addr;
+
+ if (!dereset) {
+ xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_REQ_REG;
+ nt_reg_addr = DSAF_SUB_SC_NT_RESET_REQ_REG;
+ } else {
+ xbar_reg_addr = DSAF_SUB_SC_XBAR_RESET_DREQ_REG;
+ nt_reg_addr = DSAF_SUB_SC_NT_RESET_DREQ_REG;
+ }
+
+ dsaf_write_sub(dsaf_dev, xbar_reg_addr, RESET_REQ_OR_DREQ);
+ dsaf_write_sub(dsaf_dev, nt_reg_addr, RESET_REQ_OR_DREQ);
+}
+
+static void hns_dsaf_rst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_DSAF_RESET_FUNC,
+ 0, dereset);
+}
+
+static void hns_dsaf_xge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ if (port >= DSAF_XGE_NUM)
+ return;
+
+ reg_val |= RESET_REQ_OR_DREQ;
+ reg_val |= 0x2082082 << dsaf_dev->mac_cb[port]->port_rst_off;
+
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_XGE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_XGE_RESET_DREQ_REG;
+
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
+}
+
+static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_XGE_RESET_FUNC, port, dereset);
+}
+
+/**
+ * hns_dsaf_srst_chns - reset dsaf channels
+ * @dsaf_dev: dsaf device struct pointer
+ * @msk: xbar channels mask value:
+ * @dereset: false - request reset , true - drop reset
+ *
+ * bit0-5 for xge0-5
+ * bit6-11 for ppe0-5
+ * bit12-17 for roce0-5
+ * bit18-19 for com/dfx
+ */
+static void
+hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+{
+ u32 reg_addr;
+
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_DSAF_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_DSAF_RESET_DREQ_REG;
+
+ dsaf_write_sub(dsaf_dev, reg_addr, msk);
+}
+
+/**
+ * hns_dsaf_srst_chns - reset dsaf channels
+ * @dsaf_dev: dsaf device struct pointer
+ * @msk: xbar channels mask value:
+ * @dereset: false - request reset , true - drop reset
+ *
+ * bit0-5 for xge0-5
+ * bit6-11 for ppe0-5
+ * bit12-17 for roce0-5
+ * bit18-19 for com/dfx
+ */
+static void
+hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_DSAF_CHN_RESET_FUNC,
+ msk, dereset);
+}
+
+static void hns_dsaf_roce_srst(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ if (!dereset) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_RESET_REQ_REG, 1);
+ } else {
+ dsaf_write_sub(dsaf_dev,
+ DSAF_SUB_SC_ROCEE_CLK_DIS_REG, 1);
+ dsaf_write_sub(dsaf_dev,
+ DSAF_SUB_SC_ROCEE_RESET_DREQ_REG, 1);
+ msleep(20);
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_ROCEE_CLK_EN_REG, 1);
+ }
+}
+
+static void hns_dsaf_roce_srst_acpi(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_ROCE_RESET_FUNC, 0, dereset);
+}
+
+static void hns_dsaf_ge_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
+{
+ u32 reg_val_1;
+ u32 reg_val_2;
+ u32 port_rst_off;
+
+ if (port >= DSAF_GE_NUM)
+ return;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ /* DSAF_MAX_PORT_NUM is 6, but DSAF_GE_NUM is 8.
+ We need check to prevent array overflow */
+ if (port >= DSAF_MAX_PORT_NUM)
+ return;
+ reg_val_1 = 0x1 << port;
+ port_rst_off = dsaf_dev->mac_cb[port]->port_rst_off;
+ /* there is difference between V1 and V2 in register.*/
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ?
+ 0x1041041 : 0x2082082;
+ reg_val_2 <<= port_rst_off;
+
+ if (!dereset) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ reg_val_1);
+
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ0_REG,
+ reg_val_2);
+ } else {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ0_REG,
+ reg_val_2);
+
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ reg_val_1);
+ }
+ } else {
+ reg_val_1 = 0x15540;
+ reg_val_2 = AE_IS_VER1(dsaf_dev->dsaf_ver) ? 0x100 : 0x40;
+
+ reg_val_1 <<= dsaf_dev->reset_offset;
+ reg_val_2 <<= dsaf_dev->reset_offset;
+
+ if (!dereset) {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_REQ1_REG,
+ reg_val_1);
+
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_REQ_REG,
+ reg_val_2);
+ } else {
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_GE_RESET_DREQ1_REG,
+ reg_val_1);
+
+ dsaf_write_sub(dsaf_dev, DSAF_SUB_SC_PPE_RESET_DREQ_REG,
+ reg_val_2);
+ }
+ }
+}
+
+static void hns_dsaf_ge_srst_by_port_acpi(struct dsaf_device *dsaf_dev,
+ u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_GE_RESET_FUNC, port, dereset);
+}
+
+static void hns_ppe_srst_by_port(struct dsaf_device *dsaf_dev, u32 port,
+ bool dereset)
+{
+ u32 reg_val = 0;
+ u32 reg_addr;
+
+ reg_val |= RESET_REQ_OR_DREQ << dsaf_dev->mac_cb[port]->port_rst_off;
+
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
+}
+
+static void
+hns_ppe_srst_by_port_acpi(struct dsaf_device *dsaf_dev, u32 port, bool dereset)
+{
+ hns_dsaf_acpi_srst_by_port(dsaf_dev, HNS_OP_RESET_FUNC,
+ HNS_PPE_RESET_FUNC, port, dereset);
+}
+
+static void hns_ppe_com_srst(struct dsaf_device *dsaf_dev, bool dereset)
+{
+ u32 reg_val;
+ u32 reg_addr;
+
+ if (!(dev_of_node(dsaf_dev->dev)))
+ return;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev)) {
+ reg_val = RESET_REQ_OR_DREQ;
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG;
+
+ } else {
+ reg_val = 0x100 << dsaf_dev->reset_offset;
+
+ if (!dereset)
+ reg_addr = DSAF_SUB_SC_PPE_RESET_REQ_REG;
+ else
+ reg_addr = DSAF_SUB_SC_PPE_RESET_DREQ_REG;
+ }
+
+ dsaf_write_sub(dsaf_dev, reg_addr, reg_val);
+}
+
+/**
+ * hns_mac_get_sds_mode - get phy ifterface form serdes mode
+ * @mac_cb: mac control block
+ * retuen phy interface
+ */
+static phy_interface_t hns_mac_get_phy_if(struct hns_mac_cb *mac_cb)
+{
+ u32 mode;
+ u32 reg;
+ bool is_ver1 = AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver);
+ int mac_id = mac_cb->mac_id;
+ phy_interface_t phy_if;
+
+ if (is_ver1) {
+ if (HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev))
+ return PHY_INTERFACE_MODE_SGMII;
+
+ if (mac_id >= 0 && mac_id <= 3)
+ reg = HNS_MAC_HILINK4_REG;
+ else
+ reg = HNS_MAC_HILINK3_REG;
+ } else{
+ if (!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev) && mac_id <= 3)
+ reg = HNS_MAC_HILINK4V2_REG;
+ else
+ reg = HNS_MAC_HILINK3V2_REG;
+ }
+
+ mode = dsaf_read_sub(mac_cb->dsaf_dev, reg);
+ if (dsaf_get_bit(mode, mac_cb->port_mode_off))
+ phy_if = PHY_INTERFACE_MODE_XGMII;
+ else
+ phy_if = PHY_INTERFACE_MODE_SGMII;
+
+ return phy_if;
+}
+
+static phy_interface_t hns_mac_get_phy_if_acpi(struct hns_mac_cb *mac_cb)
+{
+ phy_interface_t phy_if = PHY_INTERFACE_MODE_NA;
+ union acpi_object *obj;
+ union acpi_object obj_args, argv4;
+
+ obj_args.integer.type = ACPI_TYPE_INTEGER;
+ obj_args.integer.value = mac_cb->mac_id;
+
+ argv4.type = ACPI_TYPE_PACKAGE,
+ argv4.package.count = 1,
+ argv4.package.elements = &obj_args,
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_PORT_TYPE_FUNC, &argv4);
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return phy_if;
+
+ phy_if = obj->integer.value ?
+ PHY_INTERFACE_MODE_XGMII : PHY_INTERFACE_MODE_SGMII;
+
+ dev_dbg(mac_cb->dev, "mac_id=%d, phy_if=%d\n", mac_cb->mac_id, phy_if);
+
+ ACPI_FREE(obj);
+
+ return phy_if;
+}
+
+static int hns_mac_get_sfp_prsnt(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ u32 val = 0;
+ int ret;
+
+ if (!mac_cb->cpld_ctrl)
+ return -ENODEV;
+
+ ret = dsaf_read_syscon(mac_cb->cpld_ctrl,
+ mac_cb->cpld_ctrl_reg + MAC_SFP_PORT_OFFSET,
+ &val);
+ if (ret)
+ return ret;
+
+ *sfp_prsnt = !val;
+ return 0;
+}
+
+static int hns_mac_get_sfp_prsnt_acpi(struct hns_mac_cb *mac_cb, int *sfp_prsnt)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args, argv4;
+
+ obj_args.integer.type = ACPI_TYPE_INTEGER;
+ obj_args.integer.value = mac_cb->mac_id;
+
+ argv4.type = ACPI_TYPE_PACKAGE,
+ argv4.package.count = 1,
+ argv4.package.elements = &obj_args,
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_GET_SFP_STAT_FUNC, &argv4);
+
+ if (!obj || obj->type != ACPI_TYPE_INTEGER)
+ return -ENODEV;
+
+ *sfp_prsnt = obj->integer.value;
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+/**
+ * hns_mac_config_sds_loopback - set loop back for serdes
+ * @mac_cb: mac control block
+ * @en: enable or disable
+ * return 0 == success
+ */
+static int hns_mac_config_sds_loopback(struct hns_mac_cb *mac_cb, bool en)
+{
+ const u8 lane_id[] = {
+ 0, /* mac 0 -> lane 0 */
+ 1, /* mac 1 -> lane 1 */
+ 2, /* mac 2 -> lane 2 */
+ 3, /* mac 3 -> lane 3 */
+ 2, /* mac 4 -> lane 2 */
+ 3, /* mac 5 -> lane 3 */
+ 0, /* mac 6 -> lane 0 */
+ 1 /* mac 7 -> lane 1 */
+ };
+#define RX_CSR(lane, reg) ((0x4080 + (reg) * 0x0002 + (lane) * 0x0200) * 2)
+ u64 reg_offset = RX_CSR(lane_id[mac_cb->mac_id], 0);
+
+ int sfp_prsnt = 0;
+ int ret = hns_mac_get_sfp_prsnt(mac_cb, &sfp_prsnt);
+
+ if (!mac_cb->phy_dev) {
+ if (ret)
+ pr_info("please confirm sfp is present or not\n");
+ else
+ if (!sfp_prsnt)
+ pr_info("no sfp in this eth\n");
+ }
+
+ if (mac_cb->serdes_ctrl) {
+ u32 origin = 0;
+
+ if (!AE_IS_VER1(mac_cb->dsaf_dev->dsaf_ver)) {
+#define HILINK_ACCESS_SEL_CFG 0x40008
+ /* hilink4 & hilink3 use the same xge training and
+ * xge u adaptor. There is a hilink access sel cfg
+ * register to select which one to be configed
+ */
+ if ((!HNS_DSAF_IS_DEBUG(mac_cb->dsaf_dev)) &&
+ (mac_cb->mac_id <= 3))
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 0);
+ else
+ dsaf_write_syscon(mac_cb->serdes_ctrl,
+ HILINK_ACCESS_SEL_CFG, 3);
+ }
+
+ ret = dsaf_read_syscon(mac_cb->serdes_ctrl, reg_offset,
+ &origin);
+ if (ret)
+ return ret;
+
+ dsaf_set_field(origin, 1ull << 10, 10, en);
+ dsaf_write_syscon(mac_cb->serdes_ctrl, reg_offset, origin);
+ } else {
+ u8 __iomem *base_addr = mac_cb->serdes_vaddr +
+ (mac_cb->mac_id <= 3 ? 0x00280000 : 0x00200000);
+ dsaf_set_reg_field(base_addr, reg_offset, 1ull << 10, 10, en);
+ }
+
+ return 0;
+}
+
+static int
+hns_mac_config_sds_loopback_acpi(struct hns_mac_cb *mac_cb, bool en)
+{
+ union acpi_object *obj;
+ union acpi_object obj_args[3], argv4;
+
+ obj_args[0].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[0].integer.value = mac_cb->mac_id;
+ obj_args[1].integer.type = ACPI_TYPE_INTEGER;
+ obj_args[1].integer.value = !!en;
+
+ argv4.type = ACPI_TYPE_PACKAGE;
+ argv4.package.count = 2;
+ argv4.package.elements = obj_args;
+
+ obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dsaf_dev->dev),
+ &hns_dsaf_acpi_dsm_guid, 0,
+ HNS_OP_SERDES_LP_FUNC, &argv4);
+ if (!obj) {
+ dev_warn(mac_cb->dsaf_dev->dev, "set port%d serdes lp fail!",
+ mac_cb->mac_id);
+
+ return -ENOTSUPP;
+ }
+
+ ACPI_FREE(obj);
+
+ return 0;
+}
+
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev)
+{
+ struct dsaf_misc_op *misc_op;
+
+ misc_op = devm_kzalloc(dsaf_dev->dev, sizeof(*misc_op), GFP_KERNEL);
+ if (!misc_op)
+ return NULL;
+
+ if (dev_of_node(dsaf_dev->dev)) {
+ misc_op->cpld_set_led = hns_cpld_set_led;
+ misc_op->cpld_reset_led = cpld_led_reset;
+ misc_op->cpld_set_led_id = cpld_set_led_id;
+
+ misc_op->dsaf_reset = hns_dsaf_rst;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port;
+ misc_op->ppe_srst = hns_ppe_srst_by_port;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+ misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns;
+ misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback;
+ } else if (is_acpi_node(dsaf_dev->dev->fwnode)) {
+ misc_op->cpld_set_led = hns_cpld_set_led_acpi;
+ misc_op->cpld_reset_led = cpld_led_reset_acpi;
+ misc_op->cpld_set_led_id = cpld_set_led_id_acpi;
+
+ misc_op->dsaf_reset = hns_dsaf_rst_acpi;
+ misc_op->xge_srst = hns_dsaf_xge_srst_by_port_acpi;
+ misc_op->ge_srst = hns_dsaf_ge_srst_by_port_acpi;
+ misc_op->ppe_srst = hns_ppe_srst_by_port_acpi;
+ misc_op->ppe_comm_srst = hns_ppe_com_srst;
+ misc_op->hns_dsaf_srst_chns = hns_dsaf_srst_chns_acpi;
+ misc_op->hns_dsaf_roce_srst = hns_dsaf_roce_srst_acpi;
+
+ misc_op->get_phy_if = hns_mac_get_phy_if_acpi;
+ misc_op->get_sfp_prsnt = hns_mac_get_sfp_prsnt_acpi;
+
+ misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback_acpi;
+ } else {
+ devm_kfree(dsaf_dev->dev, (void *)misc_op);
+ misc_op = NULL;
+ }
+
+ return (void *)misc_op;
+}
+
+struct
+platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode)
+{
+ struct device *dev;
+
+ dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode);
+ return dev ? to_platform_device(dev) : NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
new file mode 100644
index 000000000..f64c6667d
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_DSAF_MISC_H
+#define _HNS_DSAF_MISC_H
+
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_mac.h"
+
+#define CPLD_ADDR_PORT_OFFSET 0x4
+
+#define HS_LED_ON 0xE
+#define HS_LED_OFF 0xF
+
+#define CPLD_LED_ON_VALUE 1
+#define CPLD_LED_DEFAULT_VALUE 0
+
+#define MAC_SFP_PORT_OFFSET 0x2
+
+#define DSAF_LED_SPEED_S 0
+#define DSAF_LED_SPEED_M (0x3 << DSAF_LED_SPEED_S)
+
+#define DSAF_LED_LINK_B 2
+#define DSAF_LED_DATA_B 4
+#define DSAF_LED_ANCHOR_B 5
+
+struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev);
+struct
+platform_device *hns_dsaf_find_platform_device(struct fwnode_handle *fwnode);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
new file mode 100644
index 000000000..d0f8b1fff
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+
+#include "hns_dsaf_ppe.h"
+
+void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value)
+{
+ dsaf_set_dev_bit(ppe_cb, PPEV2_CFG_TSO_EN_REG, 0, !!value);
+}
+
+void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
+ const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM])
+{
+ u32 key_item;
+
+ for (key_item = 0; key_item < HNS_PPEV2_RSS_KEY_NUM; key_item++)
+ dsaf_write_dev(ppe_cb, PPEV2_RSS_KEY_REG + key_item * 0x4,
+ rss_key[key_item]);
+}
+
+void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
+ const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE])
+{
+ int i;
+ int reg_value;
+
+ for (i = 0; i < (HNS_PPEV2_RSS_IND_TBL_SIZE / 4); i++) {
+ reg_value = dsaf_read_dev(ppe_cb,
+ PPEV2_INDRECTION_TBL_REG + i * 0x4);
+
+ dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N0_M,
+ PPEV2_CFG_RSS_TBL_4N0_S,
+ rss_tab[i * 4 + 0] & 0x1F);
+ dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N1_M,
+ PPEV2_CFG_RSS_TBL_4N1_S,
+ rss_tab[i * 4 + 1] & 0x1F);
+ dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N2_M,
+ PPEV2_CFG_RSS_TBL_4N2_S,
+ rss_tab[i * 4 + 2] & 0x1F);
+ dsaf_set_field(reg_value, PPEV2_CFG_RSS_TBL_4N3_M,
+ PPEV2_CFG_RSS_TBL_4N3_S,
+ rss_tab[i * 4 + 3] & 0x1F);
+ dsaf_write_dev(
+ ppe_cb, PPEV2_INDRECTION_TBL_REG + i * 0x4, reg_value);
+ }
+}
+
+static u8 __iomem *
+hns_ppe_common_get_ioaddr(struct ppe_common_cb *ppe_common)
+{
+ return ppe_common->dsaf_dev->ppe_base + PPE_COMMON_REG_OFFSET;
+}
+
+/**
+ * hns_ppe_common_get_cfg - get ppe common config
+ * @dsaf_dev: dasf device
+ * @comm_index: common index
+ * return 0 - success , negative --fail
+ */
+static int hns_ppe_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index)
+{
+ struct ppe_common_cb *ppe_common;
+ int ppe_num;
+
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ ppe_num = HNS_PPE_SERVICE_NW_ENGINE_NUM;
+ else
+ ppe_num = HNS_PPE_DEBUG_NW_ENGINE_NUM;
+
+ ppe_common = devm_kzalloc(dsaf_dev->dev,
+ struct_size(ppe_common, ppe_cb, ppe_num),
+ GFP_KERNEL);
+ if (!ppe_common)
+ return -ENOMEM;
+
+ ppe_common->ppe_num = ppe_num;
+ ppe_common->dsaf_dev = dsaf_dev;
+ ppe_common->comm_index = comm_index;
+ if (!HNS_DSAF_IS_DEBUG(dsaf_dev))
+ ppe_common->ppe_mode = PPE_COMMON_MODE_SERVICE;
+ else
+ ppe_common->ppe_mode = PPE_COMMON_MODE_DEBUG;
+ ppe_common->dev = dsaf_dev->dev;
+
+ ppe_common->io_base = hns_ppe_common_get_ioaddr(ppe_common);
+
+ dsaf_dev->ppe_common[comm_index] = ppe_common;
+
+ return 0;
+}
+
+static void
+hns_ppe_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index)
+{
+ dsaf_dev->ppe_common[comm_index] = NULL;
+}
+
+static u8 __iomem *hns_ppe_get_iobase(struct ppe_common_cb *ppe_common,
+ int ppe_idx)
+{
+ return ppe_common->dsaf_dev->ppe_base + ppe_idx * PPE_REG_OFFSET;
+}
+
+static void hns_ppe_get_cfg(struct ppe_common_cb *ppe_common)
+{
+ u32 i;
+ struct hns_ppe_cb *ppe_cb;
+ u32 ppe_num = ppe_common->ppe_num;
+
+ for (i = 0; i < ppe_num; i++) {
+ ppe_cb = &ppe_common->ppe_cb[i];
+ ppe_cb->dev = ppe_common->dev;
+ ppe_cb->next = NULL;
+ ppe_cb->ppe_common_cb = ppe_common;
+ ppe_cb->index = i;
+ ppe_cb->io_base = hns_ppe_get_iobase(ppe_common, i);
+ ppe_cb->virq = 0;
+ }
+}
+
+static void hns_ppe_cnt_clr_ce(struct hns_ppe_cb *ppe_cb)
+{
+ dsaf_set_dev_bit(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG,
+ PPE_CNT_CLR_CE_B, 1);
+}
+
+static void hns_ppe_set_vlan_strip(struct hns_ppe_cb *ppe_cb, int en)
+{
+ dsaf_write_dev(ppe_cb, PPEV2_VLAN_STRIP_EN_REG, en);
+}
+
+/**
+ * hns_ppe_checksum_hw - set ppe checksum caculate
+ * @ppe_cb: ppe device
+ * @value: value
+ */
+static void hns_ppe_checksum_hw(struct hns_ppe_cb *ppe_cb, u32 value)
+{
+ dsaf_set_dev_field(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG,
+ 0xfffffff, 0, value);
+}
+
+static void hns_ppe_set_qid_mode(struct ppe_common_cb *ppe_common,
+ enum ppe_qid_mode qid_mdoe)
+{
+ dsaf_set_dev_field(ppe_common, PPE_COM_CFG_QID_MODE_REG,
+ PPE_CFG_QID_MODE_CF_QID_MODE_M,
+ PPE_CFG_QID_MODE_CF_QID_MODE_S, qid_mdoe);
+}
+
+/**
+ * hns_ppe_set_qid - set ppe qid
+ * @ppe_common: ppe common device
+ * @qid: queue id
+ */
+static void hns_ppe_set_qid(struct ppe_common_cb *ppe_common, u32 qid)
+{
+ u32 qid_mod = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+
+ if (!dsaf_get_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+ PPE_CFG_QID_MODE_DEF_QID_S)) {
+ dsaf_set_field(qid_mod, PPE_CFG_QID_MODE_DEF_QID_M,
+ PPE_CFG_QID_MODE_DEF_QID_S, qid);
+ dsaf_write_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG, qid_mod);
+ }
+}
+
+/**
+ * hns_ppe_set_port_mode - set port mode
+ * @ppe_cb: ppe device
+ * @mode: port mode
+ */
+static void hns_ppe_set_port_mode(struct hns_ppe_cb *ppe_cb,
+ enum ppe_port_mode mode)
+{
+ dsaf_write_dev(ppe_cb, PPE_CFG_XGE_MODE_REG, mode);
+}
+
+/**
+ * hns_ppe_common_init_hw - init ppe common device
+ * @ppe_common: ppe common device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_ppe_common_init_hw(struct ppe_common_cb *ppe_common)
+{
+ enum ppe_qid_mode qid_mode;
+ struct dsaf_device *dsaf_dev = ppe_common->dsaf_dev;
+ enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
+
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 0);
+ msleep(100);
+ dsaf_dev->misc_op->ppe_comm_srst(dsaf_dev, 1);
+ msleep(100);
+
+ if (ppe_common->ppe_mode == PPE_COMMON_MODE_SERVICE) {
+ switch (dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ case DSAF_MODE_DISABLE_FIX:
+ qid_mode = PPE_QID_MODE0;
+ hns_ppe_set_qid(ppe_common, 0);
+ break;
+ case DSAF_MODE_ENABLE_0VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ qid_mode = PPE_QID_MODE3;
+ break;
+ case DSAF_MODE_ENABLE_8VM:
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ qid_mode = PPE_QID_MODE4;
+ break;
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ qid_mode = PPE_QID_MODE5;
+ break;
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ qid_mode = PPE_QID_MODE2;
+ break;
+ case DSAF_MODE_ENABLE_128VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ qid_mode = PPE_QID_MODE1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ qid_mode = PPE_QID_MODE7;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ qid_mode = PPE_QID_MODE6;
+ break;
+ default:
+ dev_err(ppe_common->dev,
+ "get ppe queue mode failed! dsaf_mode=%d\n",
+ dsaf_mode);
+ return -EINVAL;
+ }
+ hns_ppe_set_qid_mode(ppe_common, qid_mode);
+ }
+
+ dsaf_set_dev_bit(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG,
+ PPE_COMMON_CNT_CLR_CE_B, 1);
+
+ return 0;
+}
+
+/*clr ppe exception irq*/
+static void hns_ppe_exc_irq_en(struct hns_ppe_cb *ppe_cb, int en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+ u32 vld_msk = 0;
+
+ /*only care bit 0,1,7*/
+ dsaf_set_bit(vld_msk, 0, 1);
+ dsaf_set_bit(vld_msk, 1, 1);
+ dsaf_set_bit(vld_msk, 7, 1);
+
+ /*clr sts**/
+ dsaf_write_dev(ppe_cb, PPE_RINT_REG, clr_vlue);
+
+ /*for some reserved bits, so set 0**/
+ dsaf_write_dev(ppe_cb, PPE_INTEN_REG, msk_vlue & vld_msk);
+}
+
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb)
+{
+ int wait_cnt;
+ u32 val;
+
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ val = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG) & 0x3ffU;
+ if (!val)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(ppe_cb->dev, "hns ppe tx fifo clean wait timeout, still has %u pkt.\n",
+ val);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ * ppe_init_hw - init ppe
+ * @ppe_cb: ppe device
+ */
+static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
+{
+ struct ppe_common_cb *ppe_common_cb = ppe_cb->ppe_common_cb;
+ u32 port = ppe_cb->index;
+ struct dsaf_device *dsaf_dev = ppe_common_cb->dsaf_dev;
+ int i;
+
+ /* get default RSS key */
+ netdev_rss_key_fill(ppe_cb->rss_key, HNS_PPEV2_RSS_KEY_SIZE);
+
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
+ mdelay(10);
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 1);
+
+ /* clr and msk except irq*/
+ hns_ppe_exc_irq_en(ppe_cb, 0);
+
+ if (ppe_common_cb->ppe_mode == PPE_COMMON_MODE_DEBUG) {
+ hns_ppe_set_port_mode(ppe_cb, PPE_MODE_GE);
+ dsaf_write_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG, 0);
+ } else {
+ hns_ppe_set_port_mode(ppe_cb, PPE_MODE_XGE);
+ }
+
+ hns_ppe_checksum_hw(ppe_cb, 0xffffffff);
+ hns_ppe_cnt_clr_ce(ppe_cb);
+
+ if (!AE_IS_VER1(dsaf_dev->dsaf_ver)) {
+ hns_ppe_set_vlan_strip(ppe_cb, 0);
+
+ dsaf_write_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG,
+ HNS_PPEV2_MAX_FRAME_LEN);
+
+ /* set default RSS key in h/w */
+ hns_ppe_set_rss_key(ppe_cb, ppe_cb->rss_key);
+
+ /* Set default indrection table in h/w */
+ for (i = 0; i < HNS_PPEV2_RSS_IND_TBL_SIZE; i++)
+ ppe_cb->rss_indir_table[i] = i;
+ hns_ppe_set_indir_table(ppe_cb, ppe_cb->rss_indir_table);
+ }
+}
+
+/**
+ * ppe_uninit_hw - uninit ppe
+ * @ppe_cb: ppe device
+ */
+static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
+{
+ u32 port;
+
+ if (ppe_cb->ppe_common_cb) {
+ struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
+
+ port = ppe_cb->index;
+ dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
+ }
+}
+
+static void hns_ppe_uninit_ex(struct ppe_common_cb *ppe_common)
+{
+ u32 i;
+
+ for (i = 0; i < ppe_common->ppe_num; i++) {
+ if (ppe_common->dsaf_dev->mac_cb[i])
+ hns_ppe_uninit_hw(&ppe_common->ppe_cb[i]);
+ memset(&ppe_common->ppe_cb[i], 0, sizeof(struct hns_ppe_cb));
+ }
+}
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev)
+{
+ u32 i;
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ if (dsaf_dev->ppe_common[i])
+ hns_ppe_uninit_ex(dsaf_dev->ppe_common[i]);
+ hns_rcb_common_free_cfg(dsaf_dev, i);
+ hns_ppe_common_free_cfg(dsaf_dev, i);
+ }
+}
+
+/**
+ * hns_ppe_reset - reinit ppe/rcb hw
+ * @dsaf_dev: dasf device
+ * @ppe_common_index: the index
+ * return void
+ */
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index)
+{
+ u32 i;
+ int ret;
+ struct ppe_common_cb *ppe_common;
+
+ ppe_common = dsaf_dev->ppe_common[ppe_common_index];
+ ret = hns_ppe_common_init_hw(ppe_common);
+ if (ret)
+ return;
+
+ for (i = 0; i < ppe_common->ppe_num; i++) {
+ /* We only need to initiate ppe when the port exists */
+ if (dsaf_dev->mac_cb[i])
+ hns_ppe_init_hw(&ppe_common->ppe_cb[i]);
+ }
+
+ ret = hns_rcb_common_init_hw(dsaf_dev->rcb_common[ppe_common_index]);
+ if (ret)
+ return;
+
+ hns_rcb_common_init_commit_hw(dsaf_dev->rcb_common[ppe_common_index]);
+}
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb)
+{
+ struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+ hw_stats->rx_pkts_from_sw
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+ hw_stats->rx_pkts
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+ hw_stats->rx_drop_no_bd
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+ hw_stats->rx_alloc_buf_fail
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+ hw_stats->rx_alloc_buf_wait
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+ hw_stats->rx_drop_no_buf
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+ hw_stats->rx_err_fifo_full
+ += dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+ hw_stats->tx_bd_form_rcb
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+ hw_stats->tx_pkts_from_rcb
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+ hw_stats->tx_pkts
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+ hw_stats->tx_err_fifo_empty
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+ hw_stats->tx_err_checksum
+ += dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+}
+
+int hns_ppe_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return ETH_PPE_STATIC_NUM;
+ return 0;
+}
+
+int hns_ppe_get_regs_count(void)
+{
+ return ETH_PPE_DUMP_NUM;
+}
+
+/**
+ * ppe_get_strings - get ppe srting
+ * @ppe_cb: ppe device
+ * @stringset: string set type
+ * @data: output string
+ */
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data)
+{
+ char *buff = (char *)data;
+ int index = ppe_cb->index;
+
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_sw_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_ok", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_drop_pkt_no_bd", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_fail", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_alloc_buf_wait", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_drop_no_buf", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_rx_pkt_err_fifo_full", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_bd", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_ok", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_fifo_empty", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "ppe%d_tx_pkt_err_csum_fail", index);
+}
+
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data)
+{
+ u64 *regs_buff = data;
+ struct hns_ppe_hw_stats *hw_stats = &ppe_cb->hw_stats;
+
+ regs_buff[0] = hw_stats->rx_pkts_from_sw;
+ regs_buff[1] = hw_stats->rx_pkts;
+ regs_buff[2] = hw_stats->rx_drop_no_bd;
+ regs_buff[3] = hw_stats->rx_alloc_buf_fail;
+ regs_buff[4] = hw_stats->rx_alloc_buf_wait;
+ regs_buff[5] = hw_stats->rx_drop_no_buf;
+ regs_buff[6] = hw_stats->rx_err_fifo_full;
+
+ regs_buff[7] = hw_stats->tx_bd_form_rcb;
+ regs_buff[8] = hw_stats->tx_pkts_from_rcb;
+ regs_buff[9] = hw_stats->tx_pkts;
+ regs_buff[10] = hw_stats->tx_err_fifo_empty;
+ regs_buff[11] = hw_stats->tx_err_checksum;
+}
+
+/**
+ * hns_ppe_init - init ppe device
+ * @dsaf_dev: dasf device
+ * return 0 - success , negative --fail
+ */
+int hns_ppe_init(struct dsaf_device *dsaf_dev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ ret = hns_ppe_common_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto get_cfg_fail;
+
+ ret = hns_rcb_common_get_cfg(dsaf_dev, i);
+ if (ret)
+ goto get_cfg_fail;
+
+ hns_ppe_get_cfg(dsaf_dev->ppe_common[i]);
+
+ ret = hns_rcb_get_cfg(dsaf_dev->rcb_common[i]);
+ if (ret)
+ goto get_cfg_fail;
+ }
+
+ for (i = 0; i < HNS_PPE_COM_NUM; i++)
+ hns_ppe_reset_common(dsaf_dev, i);
+
+ return 0;
+
+get_cfg_fail:
+ for (i = 0; i < HNS_PPE_COM_NUM; i++) {
+ hns_rcb_common_free_cfg(dsaf_dev, i);
+ hns_ppe_common_free_cfg(dsaf_dev, i);
+ }
+
+ return ret;
+}
+
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data)
+{
+ struct ppe_common_cb *ppe_common = ppe_cb->ppe_common_cb;
+ u32 *regs = data;
+ u32 i;
+ u32 offset;
+
+ /* ppe common registers */
+ regs[0] = dsaf_read_dev(ppe_common, PPE_COM_CFG_QID_MODE_REG);
+ regs[1] = dsaf_read_dev(ppe_common, PPE_COM_INTEN_REG);
+ regs[2] = dsaf_read_dev(ppe_common, PPE_COM_RINT_REG);
+ regs[3] = dsaf_read_dev(ppe_common, PPE_COM_INTSTS_REG);
+ regs[4] = dsaf_read_dev(ppe_common, PPE_COM_COMMON_CNT_CLR_CE_REG);
+
+ for (i = 0; i < DSAF_TOTAL_QUEUE_NUM; i++) {
+ offset = PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 0x4 * i;
+ regs[5 + i] = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM]
+ = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 2]
+ = dsaf_read_dev(ppe_common, offset);
+ offset = PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 0x4 * i;
+ regs[5 + i + DSAF_TOTAL_QUEUE_NUM * 3]
+ = dsaf_read_dev(ppe_common, offset);
+ }
+
+ /* mark end of ppe regs */
+ for (i = 521; i < 524; i++)
+ regs[i] = 0xeeeeeeee;
+
+ /* ppe channel registers */
+ regs[525] = dsaf_read_dev(ppe_cb, PPE_CFG_TX_FIFO_THRSLD_REG);
+ regs[526] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_THRSLD_REG);
+ regs[527] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG);
+ regs[528] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG);
+ regs[529] = dsaf_read_dev(ppe_cb, PPE_CFG_PAUSE_IDLE_CNT_REG);
+ regs[530] = dsaf_read_dev(ppe_cb, PPE_CFG_BUS_CTRL_REG);
+ regs[531] = dsaf_read_dev(ppe_cb, PPE_CFG_TNL_TO_BE_RST_REG);
+ regs[532] = dsaf_read_dev(ppe_cb, PPE_CURR_TNL_CAN_RST_REG);
+
+ regs[533] = dsaf_read_dev(ppe_cb, PPE_CFG_XGE_MODE_REG);
+ regs[534] = dsaf_read_dev(ppe_cb, PPE_CFG_MAX_FRAME_LEN_REG);
+ regs[535] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_MODE_REG);
+ regs[536] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_VLAN_TAG_REG);
+ regs[537] = dsaf_read_dev(ppe_cb, PPE_CFG_TAG_GEN_REG);
+ regs[538] = dsaf_read_dev(ppe_cb, PPE_CFG_PARSE_TAG_REG);
+ regs[539] = dsaf_read_dev(ppe_cb, PPE_CFG_PRO_CHECK_EN_REG);
+
+ regs[540] = dsaf_read_dev(ppe_cb, PPE_INTEN_REG);
+ regs[541] = dsaf_read_dev(ppe_cb, PPE_RINT_REG);
+ regs[542] = dsaf_read_dev(ppe_cb, PPE_INTSTS_REG);
+ regs[543] = dsaf_read_dev(ppe_cb, PPE_CFG_RX_PKT_INT_REG);
+
+ regs[544] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME0_REG);
+ regs[545] = dsaf_read_dev(ppe_cb, PPE_CFG_HEAT_DECT_TIME1_REG);
+
+ /* ppe static */
+ regs[546] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_SW_PKT_CNT_REG);
+ regs[547] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG);
+ regs[548] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_NO_BUF_CNT_REG);
+ regs[549] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_BD_CNT_REG);
+ regs[550] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CNT_REG);
+ regs[551] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_OK_CNT_REG);
+ regs[552] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_EPT_CNT_REG);
+ regs[553] = dsaf_read_dev(ppe_cb, PPE_HIS_TX_PKT_CS_FAIL_CNT_REG);
+ regs[554] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_FAIL_CNT_REG);
+ regs[555] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_APP_BUF_WAIT_CNT_REG);
+ regs[556] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_FUL_CNT_REG);
+ regs[557] = dsaf_read_dev(ppe_cb, PPE_HIS_RX_PKT_DROP_PRT_CNT_REG);
+
+ regs[558] = dsaf_read_dev(ppe_cb, PPE_TNL_0_5_CNT_CLR_CE_REG);
+ regs[559] = dsaf_read_dev(ppe_cb, PPE_CFG_AXI_DBG_REG);
+ regs[560] = dsaf_read_dev(ppe_cb, PPE_HIS_PRO_ERR_REG);
+ regs[561] = dsaf_read_dev(ppe_cb, PPE_HIS_TNL_FIFO_ERR_REG);
+ regs[562] = dsaf_read_dev(ppe_cb, PPE_CURR_CFF_DATA_NUM_REG);
+ regs[563] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_ST_REG);
+ regs[564] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_ST_REG);
+ regs[565] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO0_REG);
+ regs[566] = dsaf_read_dev(ppe_cb, PPE_CURR_RX_FIFO1_REG);
+ regs[567] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO0_REG);
+ regs[568] = dsaf_read_dev(ppe_cb, PPE_CURR_TX_FIFO1_REG);
+ regs[569] = dsaf_read_dev(ppe_cb, PPE_ECO0_REG);
+ regs[570] = dsaf_read_dev(ppe_cb, PPE_ECO1_REG);
+ regs[571] = dsaf_read_dev(ppe_cb, PPE_ECO2_REG);
+
+ /* mark end of ppe regs */
+ for (i = 572; i < 576; i++)
+ regs[i] = 0xeeeeeeee;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
new file mode 100644
index 000000000..0f0e16f9a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.h
@@ -0,0 +1,119 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_DSAF_PPE_H
+#define _HNS_DSAF_PPE_H
+
+#include <linux/platform_device.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_rcb.h"
+
+#define HNS_PPE_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_PPE_DEBUG_NW_ENGINE_NUM 1
+#define HNS_PPE_COM_NUM DSAF_COMM_DEV_NUM
+
+#define PPE_COMMON_REG_OFFSET 0x70000
+#define PPE_REG_OFFSET 0x10000
+
+#define ETH_PPE_DUMP_NUM 576
+#define ETH_PPE_STATIC_NUM 12
+
+#define HNS_PPEV2_RSS_IND_TBL_SIZE 256
+#define HNS_PPEV2_RSS_KEY_SIZE 40 /* in bytes or 320 bits */
+#define HNS_PPEV2_RSS_KEY_NUM (HNS_PPEV2_RSS_KEY_SIZE / sizeof(u32))
+
+#define HNS_PPEV2_MAX_FRAME_LEN 0X980
+
+enum ppe_qid_mode {
+ PPE_QID_MODE0 = 0, /* fixed queue id mode */
+ PPE_QID_MODE1, /* switch:128VM non switch:6Port/4VM/4TC */
+ PPE_QID_MODE2, /* switch:32VM/4TC non switch:6Port/16VM */
+ PPE_QID_MODE3, /* switch:4TC/8RSS non switch:2Port/64VM */
+ PPE_QID_MODE4, /* switch:8VM/16RSS non switch:2Port/16VM/4TC */
+ PPE_QID_MODE5, /* switch:16VM/8TC non switch:6Port/16RSS */
+ PPE_QID_MODE6, /* switch:32VM/4RSS non switch:6Port/2VM/8TC */
+ PPE_QID_MODE7, /* switch:32RSS non switch:2Port/8VM/8TC */
+ PPE_QID_MODE8, /* switch:6VM/4TC/4RSS non switch:2Port/16VM/4RSS */
+ PPE_QID_MODE9, /* non switch:2Port/32VM/2RSS */
+ PPE_QID_MODE10, /* non switch:2Port/32RSS */
+ PPE_QID_MODE11, /* non switch:2Port/4TC/16RSS */
+};
+
+enum ppe_port_mode {
+ PPE_MODE_GE = 0,
+ PPE_MODE_XGE,
+};
+
+enum ppe_common_mode {
+ PPE_COMMON_MODE_DEBUG = 0,
+ PPE_COMMON_MODE_SERVICE,
+ PPE_COMMON_MODE_MAX
+};
+
+struct hns_ppe_hw_stats {
+ u64 rx_pkts_from_sw;
+ u64 rx_pkts;
+ u64 rx_drop_no_bd;
+ u64 rx_alloc_buf_fail;
+ u64 rx_alloc_buf_wait;
+ u64 rx_drop_no_buf;
+ u64 rx_err_fifo_full;
+ u64 tx_bd_form_rcb;
+ u64 tx_pkts_from_rcb;
+ u64 tx_pkts;
+ u64 tx_err_fifo_empty;
+ u64 tx_err_checksum;
+};
+
+struct hns_ppe_cb {
+ struct device *dev;
+ struct hns_ppe_cb *next; /* pointer to next ppe device */
+ struct ppe_common_cb *ppe_common_cb; /* belong to */
+ struct hns_ppe_hw_stats hw_stats;
+
+ u8 index; /* index in a ppe common device */
+ u8 __iomem *io_base;
+ int virq;
+ u32 rss_indir_table[HNS_PPEV2_RSS_IND_TBL_SIZE]; /*shadow indir tab */
+ u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]; /* rss hash key */
+};
+
+struct ppe_common_cb {
+ struct device *dev;
+ struct dsaf_device *dsaf_dev;
+ u8 __iomem *io_base;
+
+ enum ppe_common_mode ppe_mode;
+
+ u8 comm_index; /*ppe_common index*/
+
+ u32 ppe_num;
+ struct hns_ppe_cb ppe_cb[];
+
+};
+
+int hns_ppe_wait_tx_fifo_clean(struct hns_ppe_cb *ppe_cb);
+int hns_ppe_init(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_uninit(struct dsaf_device *dsaf_dev);
+
+void hns_ppe_reset_common(struct dsaf_device *dsaf_dev, u8 ppe_common_index);
+
+void hns_ppe_update_stats(struct hns_ppe_cb *ppe_cb);
+
+int hns_ppe_get_sset_count(int stringset);
+int hns_ppe_get_regs_count(void);
+void hns_ppe_get_regs(struct hns_ppe_cb *ppe_cb, void *data);
+
+void hns_ppe_get_strings(struct hns_ppe_cb *ppe_cb, int stringset, u8 *data);
+void hns_ppe_get_stats(struct hns_ppe_cb *ppe_cb, u64 *data);
+void hns_ppe_set_tso_enable(struct hns_ppe_cb *ppe_cb, u32 value);
+void hns_ppe_set_rss_key(struct hns_ppe_cb *ppe_cb,
+ const u32 rss_key[HNS_PPEV2_RSS_KEY_NUM]);
+void hns_ppe_set_indir_table(struct hns_ppe_cb *ppe_cb,
+ const u32 rss_tab[HNS_PPEV2_RSS_IND_TBL_SIZE]);
+#endif /* _HNS_DSAF_PPE_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
new file mode 100644
index 000000000..b6c8910cf
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.c
@@ -0,0 +1,1119 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/cdev.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/cacheflush.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_ppe.h"
+#include "hns_dsaf_rcb.h"
+
+#define RCB_COMMON_REG_OFFSET 0x80000
+#define TX_RING 0
+#define RX_RING 1
+
+#define RCB_RESET_WAIT_TIMES 30
+#define RCB_RESET_TRY_TIMES 10
+
+/* Because default mtu is 1500, rcb buffer size is set to 2048 enough */
+#define RCB_DEFAULT_BUFFER_SIZE 2048
+
+/**
+ *hns_rcb_wait_fbd_clean - clean fbd
+ *@qs: ring struct pointer array
+ *@q_num: num of array
+ *@flag: tx or rx flag
+ */
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag)
+{
+ int i, wait_cnt;
+ u32 fbd_num;
+
+ for (wait_cnt = i = 0; i < q_num; wait_cnt++) {
+ usleep_range(200, 300);
+ fbd_num = 0;
+ if (flag & RCB_INT_FLAG_TX)
+ fbd_num += dsaf_read_dev(qs[i],
+ RCB_RING_TX_RING_FBDNUM_REG);
+ if (flag & RCB_INT_FLAG_RX)
+ fbd_num += dsaf_read_dev(qs[i],
+ RCB_RING_RX_RING_FBDNUM_REG);
+ if (!fbd_num)
+ i++;
+ if (wait_cnt >= 10000)
+ break;
+ }
+
+ if (i < q_num)
+ dev_err(qs[i]->handle->owner_dev,
+ "queue(%d) wait fbd(%d) clean fail!!\n", i, fbd_num);
+}
+
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs)
+{
+ u32 head, tail;
+ int wait_cnt;
+
+ tail = dsaf_read_dev(&qs->tx_ring, RCB_REG_TAIL);
+ wait_cnt = 0;
+ while (wait_cnt++ < HNS_MAX_WAIT_CNT) {
+ head = dsaf_read_dev(&qs->tx_ring, RCB_REG_HEAD);
+ if (tail == head)
+ break;
+
+ usleep_range(100, 200);
+ }
+
+ if (wait_cnt >= HNS_MAX_WAIT_CNT) {
+ dev_err(qs->dev->dev, "rcb wait timeout, head not equal to tail.\n");
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+/**
+ *hns_rcb_reset_ring_hw - ring reset
+ *@q: ring struct pointer
+ */
+void hns_rcb_reset_ring_hw(struct hnae_queue *q)
+{
+ u32 wait_cnt;
+ u32 try_cnt = 0;
+ u32 could_ret;
+
+ u32 tx_fbd_num;
+
+ while (try_cnt++ < RCB_RESET_TRY_TIMES) {
+ usleep_range(100, 200);
+ tx_fbd_num = dsaf_read_dev(q, RCB_RING_TX_RING_FBDNUM_REG);
+ if (tx_fbd_num)
+ continue;
+
+ dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, 0);
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+ msleep(20);
+ could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+ wait_cnt = 0;
+ while (!could_ret && (wait_cnt < RCB_RESET_WAIT_TIMES)) {
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 1);
+
+ msleep(20);
+ could_ret = dsaf_read_dev(q, RCB_RING_COULD_BE_RST);
+
+ wait_cnt++;
+ }
+
+ dsaf_write_dev(q, RCB_RING_T0_BE_RST, 0);
+
+ if (could_ret)
+ break;
+ }
+
+ if (try_cnt >= RCB_RESET_TRY_TIMES)
+ dev_err(q->dev->dev, "port%d reset ring fail\n",
+ hns_ae_get_vf_cb(q->handle)->port_index);
+}
+
+/**
+ *hns_rcb_int_ctrl_hw - rcb irq enable control
+ *@q: hnae queue struct pointer
+ *@flag:ring flag tx or rx
+ *@mask:mask
+ */
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
+{
+ u32 int_mask_en = !!mask;
+
+ if (flag & RCB_INT_FLAG_TX) {
+ dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
+ dsaf_write_dev(q, RCB_RING_INTMSK_TX_OVERTIME_REG,
+ int_mask_en);
+ }
+
+ if (flag & RCB_INT_FLAG_RX) {
+ dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
+ dsaf_write_dev(q, RCB_RING_INTMSK_RX_OVERTIME_REG,
+ int_mask_en);
+ }
+}
+
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag)
+{
+ if (flag & RCB_INT_FLAG_TX) {
+ dsaf_write_dev(q, RCB_RING_INTSTS_TX_RING_REG, 1);
+ dsaf_write_dev(q, RCB_RING_INTSTS_TX_OVERTIME_REG, 1);
+ }
+
+ if (flag & RCB_INT_FLAG_RX) {
+ dsaf_write_dev(q, RCB_RING_INTSTS_RX_RING_REG, 1);
+ dsaf_write_dev(q, RCB_RING_INTSTS_RX_OVERTIME_REG, 1);
+ }
+}
+
+void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask)
+{
+ u32 int_mask_en = !!mask;
+
+ if (flag & RCB_INT_FLAG_TX)
+ dsaf_write_dev(q, RCB_RING_INTMSK_TXWL_REG, int_mask_en);
+
+ if (flag & RCB_INT_FLAG_RX)
+ dsaf_write_dev(q, RCB_RING_INTMSK_RXWL_REG, int_mask_en);
+}
+
+void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag)
+{
+ if (flag & RCB_INT_FLAG_TX)
+ dsaf_write_dev(q, RCBV2_TX_RING_INT_STS_REG, 1);
+
+ if (flag & RCB_INT_FLAG_RX)
+ dsaf_write_dev(q, RCBV2_RX_RING_INT_STS_REG, 1);
+}
+
+/**
+ *hns_rcb_ring_enable_hw - enable ring
+ *@q: rcb ring
+ *@val: value to write
+ */
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val)
+{
+ dsaf_write_dev(q, RCB_RING_PREFETCH_EN_REG, !!val);
+}
+
+void hns_rcb_start(struct hnae_queue *q, u32 val)
+{
+ hns_rcb_ring_enable_hw(q, val);
+}
+
+/**
+ *hns_rcb_common_init_commit_hw - make rcb common init completed
+ *@rcb_common: rcb common device
+ */
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common)
+{
+ wmb(); /* Sync point before breakpoint */
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_SYS_FSH_REG, 1);
+ wmb(); /* Sync point after breakpoint */
+}
+
+/* hns_rcb_set_tx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+ u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_LEN_REG,
+ bd_size_type);
+}
+
+/* hns_rcb_set_rx_ring_bs - init rcb ring buf size regester
+ *@q: hnae_queue
+ *@buf_size: buffer size set to hw
+ */
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size)
+{
+ u32 bd_size_type = hns_rcb_buf_size2type(buf_size);
+
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_LEN_REG,
+ bd_size_type);
+}
+
+/**
+ *hns_rcb_ring_init - init rcb ring
+ *@ring_pair: ring pair control block
+ *@ring_type: ring type, RX_RING or TX_RING
+ */
+static void hns_rcb_ring_init(struct ring_pair_cb *ring_pair, int ring_type)
+{
+ struct hnae_queue *q = &ring_pair->q;
+ struct hnae_ring *ring =
+ (ring_type == RX_RING) ? &q->rx_ring : &q->tx_ring;
+ dma_addr_t dma = ring->desc_dma_addr;
+
+ if (ring_type == RX_RING) {
+ dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ dsaf_write_dev(q, RCB_RING_RX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+
+ hns_rcb_set_rx_ring_bs(q, ring->buf_size);
+
+ dsaf_write_dev(q, RCB_RING_RX_RING_BD_NUM_REG,
+ ring_pair->port_id_in_comm);
+ dsaf_write_dev(q, RCB_RING_RX_RING_PKTLINE_REG,
+ ring_pair->port_id_in_comm);
+ } else {
+ dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ dsaf_write_dev(q, RCB_RING_TX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+
+ hns_rcb_set_tx_ring_bs(q, ring->buf_size);
+
+ dsaf_write_dev(q, RCB_RING_TX_RING_BD_NUM_REG,
+ ring_pair->port_id_in_comm);
+ dsaf_write_dev(q, RCB_RING_TX_RING_PKTLINE_REG,
+ ring_pair->port_id_in_comm + HNS_RCB_TX_PKTLINE_OFFSET);
+ }
+}
+
+/**
+ *hns_rcb_init_hw - init rcb hardware
+ *@ring: rcb ring
+ */
+void hns_rcb_init_hw(struct ring_pair_cb *ring)
+{
+ hns_rcb_ring_init(ring, RX_RING);
+ hns_rcb_ring_init(ring, TX_RING);
+}
+
+/**
+ *hns_rcb_set_port_desc_cnt - set rcb port description num
+ *@rcb_common: rcb_common device
+ *@port_idx:port index
+ *@desc_cnt:BD num
+ */
+static void hns_rcb_set_port_desc_cnt(struct rcb_common_cb *rcb_common,
+ u32 port_idx, u32 desc_cnt)
+{
+ dsaf_write_dev(rcb_common, RCB_CFG_BD_NUM_REG + port_idx * 4,
+ desc_cnt);
+}
+
+static void hns_rcb_set_port_timeout(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
+{
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ dsaf_write_dev(rcb_common, RCB_CFG_OVERTIME_REG,
+ timeout * HNS_RCB_CLK_FREQ_MHZ);
+ } else if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
+ if (timeout > HNS_RCB_DEF_GAP_TIME_USECS)
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+ HNS_RCB_DEF_GAP_TIME_USECS);
+ else
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_INT_GAPTIME_REG + port_idx * 4,
+ timeout);
+
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+ timeout);
+ } else {
+ dsaf_write_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4,
+ timeout);
+ }
+}
+
+static int hns_rcb_common_get_port_num(struct rcb_common_cb *rcb_common)
+{
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
+ return HNS_RCB_SERVICE_NW_ENGINE_NUM;
+ else
+ return HNS_RCB_DEBUG_NW_ENGINE_NUM;
+}
+
+/*clr rcb comm exception irq**/
+static void hns_rcb_comm_exc_irq_en(
+ struct rcb_common_cb *rcb_common, int en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0 : 0xfffffffful;
+
+ /* clr int*/
+ dsaf_write_dev(rcb_common, RCB_COM_INTSTS_ECC_ERR_REG, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_RING_STS, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_BD_RINT_STS, clr_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_RINT_TX_PKT_REG, clr_vlue);
+ dsaf_write_dev(rcb_common, RCB_COM_AXI_ERR_STS, clr_vlue);
+
+ /*en msk*/
+ dsaf_write_dev(rcb_common, RCB_COM_INTMASK_ECC_ERR_REG, msk_vlue);
+
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_RING, msk_vlue);
+
+ /*for tx bd neednot cacheline, so msk sf_txring_fbd_intmask (bit 1)**/
+ dsaf_write_dev(rcb_common, RCB_COM_SF_CFG_INTMASK_BD, msk_vlue | 2);
+
+ dsaf_write_dev(rcb_common, RCB_COM_INTMSK_TX_PKT_REG, msk_vlue);
+ dsaf_write_dev(rcb_common, RCB_COM_AXI_WR_ERR_INTMASK, msk_vlue);
+}
+
+/**
+ *hns_rcb_common_init_hw - init rcb common hardware
+ *@rcb_common: rcb_common device
+ *retuen 0 - success , negative --fail
+ */
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common)
+{
+ u32 reg_val;
+ int i;
+ int port_num = hns_rcb_common_get_port_num(rcb_common);
+
+ hns_rcb_comm_exc_irq_en(rcb_common, 0);
+
+ reg_val = dsaf_read_dev(rcb_common, RCB_COM_CFG_INIT_FLAG_REG);
+ if (0x1 != (reg_val & 0x1)) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "RCB_COM_CFG_INIT_FLAG_REG reg = 0x%x\n", reg_val);
+ return -EBUSY;
+ }
+
+ for (i = 0; i < port_num; i++) {
+ hns_rcb_set_port_desc_cnt(rcb_common, i, rcb_common->desc_num);
+ hns_rcb_set_rx_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_RX_COALESCED_FRAMES);
+ if (!AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver) &&
+ !HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
+ hns_rcb_set_tx_coalesced_frames(
+ rcb_common, i, HNS_RCB_DEF_TX_COALESCED_FRAMES);
+ hns_rcb_set_port_timeout(
+ rcb_common, i, HNS_RCB_DEF_COALESCED_USECS);
+ }
+
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_ENDIAN_REG,
+ HNS_RCB_COMMON_ENDIAN);
+
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_FNA_REG, 0x0);
+ dsaf_write_dev(rcb_common, RCB_COM_CFG_FA_REG, 0x1);
+ } else {
+ dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
+ RCB_COM_CFG_FNA_B, false);
+ dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_USER_REG,
+ RCB_COM_CFG_FA_B, true);
+ dsaf_set_dev_bit(rcb_common, RCBV2_COM_CFG_TSO_MODE_REG,
+ RCB_COM_TSO_MODE_B, HNS_TSO_MODE_8BD_32K);
+ }
+
+ return 0;
+}
+
+int hns_rcb_buf_size2type(u32 buf_size)
+{
+ int bd_size_type;
+
+ switch (buf_size) {
+ case 512:
+ bd_size_type = HNS_BD_SIZE_512_TYPE;
+ break;
+ case 1024:
+ bd_size_type = HNS_BD_SIZE_1024_TYPE;
+ break;
+ case 2048:
+ bd_size_type = HNS_BD_SIZE_2048_TYPE;
+ break;
+ case 4096:
+ bd_size_type = HNS_BD_SIZE_4096_TYPE;
+ break;
+ default:
+ bd_size_type = -EINVAL;
+ }
+
+ return bd_size_type;
+}
+
+static void hns_rcb_ring_get_cfg(struct hnae_queue *q, int ring_type)
+{
+ struct hnae_ring *ring;
+ struct rcb_common_cb *rcb_common;
+ struct ring_pair_cb *ring_pair_cb;
+ u16 desc_num, mdnum_ppkt;
+ bool irq_idx, is_ver1;
+
+ ring_pair_cb = container_of(q, struct ring_pair_cb, q);
+ is_ver1 = AE_IS_VER1(ring_pair_cb->rcb_common->dsaf_dev->dsaf_ver);
+ if (ring_type == RX_RING) {
+ ring = &q->rx_ring;
+ ring->io_base = ring_pair_cb->q.io_base;
+ irq_idx = HNS_RCB_IRQ_IDX_RX;
+ mdnum_ppkt = HNS_RCB_RING_MAX_BD_PER_PKT;
+ } else {
+ ring = &q->tx_ring;
+ ring->io_base = ring_pair_cb->q.io_base +
+ HNS_RCB_TX_REG_OFFSET;
+ irq_idx = HNS_RCB_IRQ_IDX_TX;
+ mdnum_ppkt = is_ver1 ? HNS_RCB_RING_MAX_TXBD_PER_PKT :
+ HNS_RCBV2_RING_MAX_TXBD_PER_PKT;
+ }
+
+ rcb_common = ring_pair_cb->rcb_common;
+ desc_num = rcb_common->dsaf_dev->desc_num;
+
+ ring->desc = NULL;
+ ring->desc_cb = NULL;
+
+ ring->irq = ring_pair_cb->virq[irq_idx];
+ ring->desc_dma_addr = 0;
+
+ ring->buf_size = RCB_DEFAULT_BUFFER_SIZE;
+ ring->desc_num = desc_num;
+ ring->max_desc_num_per_pkt = mdnum_ppkt;
+ ring->max_raw_data_sz_per_desc = HNS_RCB_MAX_PKT_SIZE;
+ ring->max_pkt_size = HNS_RCB_MAX_PKT_SIZE;
+ ring->next_to_use = 0;
+ ring->next_to_clean = 0;
+}
+
+static void hns_rcb_ring_pair_get_cfg(struct ring_pair_cb *ring_pair_cb)
+{
+ ring_pair_cb->q.handle = NULL;
+
+ hns_rcb_ring_get_cfg(&ring_pair_cb->q, RX_RING);
+ hns_rcb_ring_get_cfg(&ring_pair_cb->q, TX_RING);
+}
+
+static int hns_rcb_get_port_in_comm(
+ struct rcb_common_cb *rcb_common, int ring_idx)
+{
+ return ring_idx / (rcb_common->max_q_per_vf * rcb_common->max_vfn);
+}
+
+#define SERVICE_RING_IRQ_IDX(v1) \
+ ((v1) ? HNS_SERVICE_RING_IRQ_IDX : HNSV2_SERVICE_RING_IRQ_IDX)
+static int hns_rcb_get_base_irq_idx(struct rcb_common_cb *rcb_common)
+{
+ bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
+
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev))
+ return SERVICE_RING_IRQ_IDX(is_ver1);
+ else
+ return HNS_DEBUG_RING_IRQ_IDX;
+}
+
+#define RCB_COMM_BASE_TO_RING_BASE(base, ringid)\
+ ((base) + 0x10000 + HNS_RCB_REG_OFFSET * (ringid))
+/**
+ *hns_rcb_get_cfg - get rcb config
+ *@rcb_common: rcb common device
+ */
+int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common)
+{
+ struct ring_pair_cb *ring_pair_cb;
+ u32 i;
+ u32 ring_num = rcb_common->ring_num;
+ int base_irq_idx = hns_rcb_get_base_irq_idx(rcb_common);
+ struct platform_device *pdev =
+ to_platform_device(rcb_common->dsaf_dev->dev);
+ bool is_ver1 = AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver);
+
+ for (i = 0; i < ring_num; i++) {
+ ring_pair_cb = &rcb_common->ring_pair_cb[i];
+ ring_pair_cb->rcb_common = rcb_common;
+ ring_pair_cb->dev = rcb_common->dsaf_dev->dev;
+ ring_pair_cb->index = i;
+ ring_pair_cb->q.io_base =
+ RCB_COMM_BASE_TO_RING_BASE(rcb_common->io_base, i);
+ ring_pair_cb->port_id_in_comm =
+ hns_rcb_get_port_in_comm(rcb_common, i);
+ ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] =
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2) :
+ platform_get_irq(pdev, base_irq_idx + i * 3 + 1);
+ ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] =
+ is_ver1 ? platform_get_irq(pdev, base_irq_idx + i * 2 + 1) :
+ platform_get_irq(pdev, base_irq_idx + i * 3);
+ if ((ring_pair_cb->virq[HNS_RCB_IRQ_IDX_TX] == -EPROBE_DEFER) ||
+ (ring_pair_cb->virq[HNS_RCB_IRQ_IDX_RX] == -EPROBE_DEFER))
+ return -EPROBE_DEFER;
+
+ ring_pair_cb->q.phy_base =
+ RCB_COMM_BASE_TO_RING_BASE(rcb_common->phy_base, i);
+ hns_rcb_ring_pair_get_cfg(ring_pair_cb);
+ }
+
+ return 0;
+}
+
+/**
+ *hns_rcb_get_rx_coalesced_frames - get rcb port rx coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
+ */
+u32 hns_rcb_get_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
+{
+ return dsaf_read_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4);
+}
+
+/**
+ *hns_rcb_get_tx_coalesced_frames - get rcb port tx coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *
+ *Returns: coalesced_frames
+ */
+u32 hns_rcb_get_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
+{
+ u64 reg;
+
+ reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+ return dsaf_read_dev(rcb_common, reg);
+}
+
+/**
+ *hns_rcb_get_coalesce_usecs - get rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *
+ *Returns: time_out
+ */
+u32 hns_rcb_get_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx)
+{
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver))
+ return dsaf_read_dev(rcb_common, RCB_CFG_OVERTIME_REG) /
+ HNS_RCB_CLK_FREQ_MHZ;
+ else
+ return dsaf_read_dev(rcb_common,
+ RCB_PORT_CFG_OVERTIME_REG + port_idx * 4);
+}
+
+/**
+ *hns_rcb_set_coalesce_usecs - set rcb port coalesced time_out
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *@timeout:tx/rx time for coalesced time_out
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int hns_rcb_set_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout)
+{
+ u32 old_timeout = hns_rcb_get_coalesce_usecs(rcb_common, port_idx);
+
+ if (timeout == old_timeout)
+ return 0;
+
+ if (AE_IS_VER1(rcb_common->dsaf_dev->dsaf_ver)) {
+ if (!HNS_DSAF_IS_DEBUG(rcb_common->dsaf_dev)) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce_usecs setting!\n");
+ return -EINVAL;
+ }
+ }
+ if (timeout > HNS_RCB_MAX_COALESCED_USECS || timeout == 0) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: coalesce_usecs setting supports 1~1023us\n");
+ return -EINVAL;
+ }
+ hns_rcb_set_port_timeout(rcb_common, port_idx, timeout);
+ return 0;
+}
+
+/**
+ *hns_rcb_set_tx_coalesced_frames - set rcb coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int hns_rcb_set_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
+{
+ u32 old_waterline =
+ hns_rcb_get_tx_coalesced_frames(rcb_common, port_idx);
+ u64 reg;
+
+ if (coalesced_frames == old_waterline)
+ return 0;
+
+ if (coalesced_frames != 1) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support tx coalesce_frames setting!\n");
+ return -EINVAL;
+ }
+
+ reg = RCB_CFG_PKTLINE_REG + (port_idx + HNS_RCB_TX_PKTLINE_OFFSET) * 4;
+ dsaf_write_dev(rcb_common, reg, coalesced_frames);
+ return 0;
+}
+
+/**
+ *hns_rcb_set_rx_coalesced_frames - set rcb rx coalesced frames
+ *@rcb_common: rcb_common device
+ *@port_idx:port id in comm
+ *@coalesced_frames:tx/rx BD num for coalesced frames
+ *
+ * Returns:
+ * Zero for success, or an error code in case of failure
+ */
+int hns_rcb_set_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames)
+{
+ u32 old_waterline =
+ hns_rcb_get_rx_coalesced_frames(rcb_common, port_idx);
+
+ if (coalesced_frames == old_waterline)
+ return 0;
+
+ if (coalesced_frames >= rcb_common->desc_num ||
+ coalesced_frames > HNS_RCB_MAX_COALESCED_FRAMES ||
+ coalesced_frames < HNS_RCB_MIN_COALESCED_FRAMES) {
+ dev_err(rcb_common->dsaf_dev->dev,
+ "error: not support coalesce_frames setting!\n");
+ return -EINVAL;
+ }
+
+ dsaf_write_dev(rcb_common, RCB_CFG_PKTLINE_REG + port_idx * 4,
+ coalesced_frames);
+ return 0;
+}
+
+/**
+ *hns_rcb_get_queue_mode - get max VM number and max ring number per VM
+ * accordding to dsaf mode
+ *@dsaf_mode: dsaf mode
+ *@max_vfn : max vfn number
+ *@max_q_per_vf:max ring number per vm
+ */
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode, u16 *max_vfn,
+ u16 *max_q_per_vf)
+{
+ switch (dsaf_mode) {
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ case DSAF_MODE_DISABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
+ *max_vfn = 1;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ *max_vfn = 64;
+ *max_q_per_vf = 1;
+ break;
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ *max_vfn = 16;
+ *max_q_per_vf = 1;
+ break;
+ default:
+ *max_vfn = 1;
+ *max_q_per_vf = 16;
+ break;
+ }
+}
+
+static int hns_rcb_get_ring_num(struct dsaf_device *dsaf_dev)
+{
+ switch (dsaf_dev->dsaf_mode) {
+ case DSAF_MODE_ENABLE_FIX:
+ case DSAF_MODE_DISABLE_SP:
+ return 1;
+
+ case DSAF_MODE_DISABLE_FIX:
+ return 6;
+
+ case DSAF_MODE_ENABLE_0VM:
+ return 32;
+
+ case DSAF_MODE_DISABLE_6PORT_0VM:
+ case DSAF_MODE_ENABLE_16VM:
+ case DSAF_MODE_DISABLE_6PORT_2VM:
+ case DSAF_MODE_DISABLE_6PORT_16VM:
+ case DSAF_MODE_DISABLE_6PORT_4VM:
+ case DSAF_MODE_ENABLE_8VM:
+ return 96;
+
+ case DSAF_MODE_DISABLE_2PORT_16VM:
+ case DSAF_MODE_DISABLE_2PORT_8VM:
+ case DSAF_MODE_ENABLE_32VM:
+ case DSAF_MODE_DISABLE_2PORT_64VM:
+ case DSAF_MODE_ENABLE_128VM:
+ return 128;
+
+ default:
+ dev_warn(dsaf_dev->dev,
+ "get ring num fail,use default!dsaf_mode=%d\n",
+ dsaf_dev->dsaf_mode);
+ return 128;
+ }
+}
+
+static u8 __iomem *hns_rcb_common_get_vaddr(struct rcb_common_cb *rcb_common)
+{
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
+
+ return dsaf_dev->ppe_base + RCB_COMMON_REG_OFFSET;
+}
+
+static phys_addr_t hns_rcb_common_get_paddr(struct rcb_common_cb *rcb_common)
+{
+ struct dsaf_device *dsaf_dev = rcb_common->dsaf_dev;
+
+ return dsaf_dev->ppe_paddr + RCB_COMMON_REG_OFFSET;
+}
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev,
+ int comm_index)
+{
+ struct rcb_common_cb *rcb_common;
+ enum dsaf_mode dsaf_mode = dsaf_dev->dsaf_mode;
+ u16 max_vfn;
+ u16 max_q_per_vf;
+ int ring_num = hns_rcb_get_ring_num(dsaf_dev);
+
+ rcb_common =
+ devm_kzalloc(dsaf_dev->dev,
+ struct_size(rcb_common, ring_pair_cb, ring_num),
+ GFP_KERNEL);
+ if (!rcb_common) {
+ dev_err(dsaf_dev->dev, "rcb common devm_kzalloc fail!\n");
+ return -ENOMEM;
+ }
+ rcb_common->comm_index = comm_index;
+ rcb_common->ring_num = ring_num;
+ rcb_common->dsaf_dev = dsaf_dev;
+
+ rcb_common->desc_num = dsaf_dev->desc_num;
+
+ hns_rcb_get_queue_mode(dsaf_mode, &max_vfn, &max_q_per_vf);
+ rcb_common->max_vfn = max_vfn;
+ rcb_common->max_q_per_vf = max_q_per_vf;
+
+ rcb_common->io_base = hns_rcb_common_get_vaddr(rcb_common);
+ rcb_common->phy_base = hns_rcb_common_get_paddr(rcb_common);
+
+ dsaf_dev->rcb_common[comm_index] = rcb_common;
+ return 0;
+}
+
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev,
+ u32 comm_index)
+{
+ dsaf_dev->rcb_common[comm_index] = NULL;
+}
+
+void hns_rcb_update_stats(struct hnae_queue *queue)
+{
+ struct ring_pair_cb *ring =
+ container_of(queue, struct ring_pair_cb, q);
+ struct dsaf_device *dsaf_dev = ring->rcb_common->dsaf_dev;
+ struct ppe_common_cb *ppe_common
+ = dsaf_dev->ppe_common[ring->rcb_common->comm_index];
+ struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+ hw_stats->rx_pkts += dsaf_read_dev(queue,
+ RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+ dsaf_write_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG, 0x1);
+
+ hw_stats->ppe_rx_ok_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+ hw_stats->ppe_rx_drop_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG + 4 * ring->index);
+
+ hw_stats->tx_pkts += dsaf_read_dev(queue,
+ RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+ dsaf_write_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG, 0x1);
+
+ hw_stats->ppe_tx_ok_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG + 4 * ring->index);
+ hw_stats->ppe_tx_drop_pkts += dsaf_read_dev(ppe_common,
+ PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG + 4 * ring->index);
+}
+
+/**
+ *hns_rcb_get_stats - get rcb statistic
+ *@queue: rcb ring
+ *@data:statistic value
+ */
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data)
+{
+ u64 *regs_buff = data;
+ struct ring_pair_cb *ring =
+ container_of(queue, struct ring_pair_cb, q);
+ struct hns_ring_hw_stats *hw_stats = &ring->hw_stats;
+
+ regs_buff[0] = hw_stats->tx_pkts;
+ regs_buff[1] = hw_stats->ppe_tx_ok_pkts;
+ regs_buff[2] = hw_stats->ppe_tx_drop_pkts;
+ regs_buff[3] =
+ dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+
+ regs_buff[4] = queue->tx_ring.stats.tx_pkts;
+ regs_buff[5] = queue->tx_ring.stats.tx_bytes;
+ regs_buff[6] = queue->tx_ring.stats.tx_err_cnt;
+ regs_buff[7] = queue->tx_ring.stats.io_err_cnt;
+ regs_buff[8] = queue->tx_ring.stats.sw_err_cnt;
+ regs_buff[9] = queue->tx_ring.stats.seg_pkt_cnt;
+ regs_buff[10] = queue->tx_ring.stats.restart_queue;
+ regs_buff[11] = queue->tx_ring.stats.tx_busy;
+
+ regs_buff[12] = hw_stats->rx_pkts;
+ regs_buff[13] = hw_stats->ppe_rx_ok_pkts;
+ regs_buff[14] = hw_stats->ppe_rx_drop_pkts;
+ regs_buff[15] =
+ dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+
+ regs_buff[16] = queue->rx_ring.stats.rx_pkts;
+ regs_buff[17] = queue->rx_ring.stats.rx_bytes;
+ regs_buff[18] = queue->rx_ring.stats.rx_err_cnt;
+ regs_buff[19] = queue->rx_ring.stats.io_err_cnt;
+ regs_buff[20] = queue->rx_ring.stats.sw_err_cnt;
+ regs_buff[21] = queue->rx_ring.stats.seg_pkt_cnt;
+ regs_buff[22] = queue->rx_ring.stats.reuse_pg_cnt;
+ regs_buff[23] = queue->rx_ring.stats.err_pkt_len;
+ regs_buff[24] = queue->rx_ring.stats.non_vld_descs;
+ regs_buff[25] = queue->rx_ring.stats.err_bd_num;
+ regs_buff[26] = queue->rx_ring.stats.l2_err;
+ regs_buff[27] = queue->rx_ring.stats.l3l4_csum_err;
+}
+
+/**
+ *hns_rcb_get_ring_sset_count - rcb string set count
+ *@stringset:ethtool cmd
+ *return rcb ring string set count
+ */
+int hns_rcb_get_ring_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS)
+ return HNS_RING_STATIC_REG_NUM;
+
+ return 0;
+}
+
+/**
+ *hns_rcb_get_common_regs_count - rcb common regs count
+ *return regs count
+ */
+int hns_rcb_get_common_regs_count(void)
+{
+ return HNS_RCB_COMMON_DUMP_REG_NUM;
+}
+
+/**
+ *rcb_get_sset_count - rcb ring regs count
+ *return regs count
+ */
+int hns_rcb_get_ring_regs_count(void)
+{
+ return HNS_RCB_RING_DUMP_REG_NUM;
+}
+
+/**
+ *hns_rcb_get_strings - get rcb string set
+ *@stringset:string set index
+ *@data:strings name value
+ *@index:queue index
+ */
+void hns_rcb_get_strings(int stringset, u8 *data, int index)
+{
+ char *buff = (char *)data;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_rcb_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_tx_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_ppe_drop_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_fbd_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_bytes", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_err_cnt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_io_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_sw_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_seg_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_restart_queue", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_ring%d_tx_busy", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_rcb_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_ppe_drop_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_fbd_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_pkt_num", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bytes", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_err_cnt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_io_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_sw_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_seg_pkt", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_reuse_pg", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_len_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_non_vld_desc_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_bd_num_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l2_err", index);
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_ring%d_l3l4csum_err", index);
+}
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_com, void *data)
+{
+ u32 *regs = data;
+ bool is_ver1 = AE_IS_VER1(rcb_com->dsaf_dev->dsaf_ver);
+ bool is_dbg = HNS_DSAF_IS_DEBUG(rcb_com->dsaf_dev);
+ u32 reg_tmp;
+ u32 reg_num_tmp;
+ u32 i = 0;
+
+ /*rcb common registers */
+ regs[0] = dsaf_read_dev(rcb_com, RCB_COM_CFG_ENDIAN_REG);
+ regs[1] = dsaf_read_dev(rcb_com, RCB_COM_CFG_SYS_FSH_REG);
+ regs[2] = dsaf_read_dev(rcb_com, RCB_COM_CFG_INIT_FLAG_REG);
+
+ regs[3] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_REG);
+ regs[4] = dsaf_read_dev(rcb_com, RCB_COM_CFG_RINVLD_REG);
+ regs[5] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FNA_REG);
+ regs[6] = dsaf_read_dev(rcb_com, RCB_COM_CFG_FA_REG);
+ regs[7] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PKT_TC_BP_REG);
+ regs[8] = dsaf_read_dev(rcb_com, RCB_COM_CFG_PPE_TNL_CLKEN_REG);
+
+ regs[9] = dsaf_read_dev(rcb_com, RCB_COM_INTMSK_TX_PKT_REG);
+ regs[10] = dsaf_read_dev(rcb_com, RCB_COM_RINT_TX_PKT_REG);
+ regs[11] = dsaf_read_dev(rcb_com, RCB_COM_INTMASK_ECC_ERR_REG);
+ regs[12] = dsaf_read_dev(rcb_com, RCB_COM_INTSTS_ECC_ERR_REG);
+ regs[13] = dsaf_read_dev(rcb_com, RCB_COM_EBD_SRAM_ERR_REG);
+ regs[14] = dsaf_read_dev(rcb_com, RCB_COM_RXRING_ERR_REG);
+ regs[15] = dsaf_read_dev(rcb_com, RCB_COM_TXRING_ERR_REG);
+ regs[16] = dsaf_read_dev(rcb_com, RCB_COM_TX_FBD_ERR_REG);
+ regs[17] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK_EN_REG);
+ regs[18] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK0_REG);
+ regs[19] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK1_REG);
+ regs[20] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK2_REG);
+ regs[21] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK3_REG);
+ regs[22] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK4_REG);
+ regs[23] = dsaf_read_dev(rcb_com, RCB_SRAM_ECC_CHK5_REG);
+ regs[24] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR0_REG);
+ regs[25] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR3_REG);
+ regs[26] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR4_REG);
+ regs[27] = dsaf_read_dev(rcb_com, RCB_ECC_ERR_ADDR5_REG);
+
+ regs[28] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_RING);
+ regs[29] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING_STS);
+ regs[30] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_RING);
+ regs[31] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_INTMASK_BD);
+ regs[32] = dsaf_read_dev(rcb_com, RCB_COM_SF_CFG_BD_RINT_STS);
+ regs[33] = dsaf_read_dev(rcb_com, RCB_COM_RCB_RD_BD_BUSY);
+ regs[34] = dsaf_read_dev(rcb_com, RCB_COM_RCB_FBD_CRT_EN);
+ regs[35] = dsaf_read_dev(rcb_com, RCB_COM_AXI_WR_ERR_INTMASK);
+ regs[36] = dsaf_read_dev(rcb_com, RCB_COM_AXI_ERR_STS);
+ regs[37] = dsaf_read_dev(rcb_com, RCB_COM_CHK_TX_FBD_NUM_REG);
+
+ /* rcb common entry registers */
+ for (i = 0; i < 16; i++) { /* total 16 model registers */
+ regs[38 + i]
+ = dsaf_read_dev(rcb_com, RCB_CFG_BD_NUM_REG + 4 * i);
+ regs[54 + i]
+ = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_REG + 4 * i);
+ }
+
+ reg_tmp = is_ver1 ? RCB_CFG_OVERTIME_REG : RCB_PORT_CFG_OVERTIME_REG;
+ reg_num_tmp = (is_ver1 || is_dbg) ? 1 : 6;
+ for (i = 0; i < reg_num_tmp; i++)
+ regs[70 + i] = dsaf_read_dev(rcb_com, reg_tmp);
+
+ regs[76] = dsaf_read_dev(rcb_com, RCB_CFG_PKTLINE_INT_NUM_REG);
+ regs[77] = dsaf_read_dev(rcb_com, RCB_CFG_OVERTIME_INT_NUM_REG);
+
+ /* mark end of rcb common regs */
+ for (i = 78; i < 80; i++)
+ regs[i] = 0xcccccccc;
+}
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data)
+{
+ u32 *regs = data;
+ struct ring_pair_cb *ring_pair
+ = container_of(queue, struct ring_pair_cb, q);
+ u32 i = 0;
+
+ /*rcb ring registers */
+ regs[0] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_L_REG);
+ regs[1] = dsaf_read_dev(queue, RCB_RING_RX_RING_BASEADDR_H_REG);
+ regs[2] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_NUM_REG);
+ regs[3] = dsaf_read_dev(queue, RCB_RING_RX_RING_BD_LEN_REG);
+ regs[4] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTLINE_REG);
+ regs[5] = dsaf_read_dev(queue, RCB_RING_RX_RING_TAIL_REG);
+ regs[6] = dsaf_read_dev(queue, RCB_RING_RX_RING_HEAD_REG);
+ regs[7] = dsaf_read_dev(queue, RCB_RING_RX_RING_FBDNUM_REG);
+ regs[8] = dsaf_read_dev(queue, RCB_RING_RX_RING_PKTNUM_RECORD_REG);
+
+ regs[9] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_L_REG);
+ regs[10] = dsaf_read_dev(queue, RCB_RING_TX_RING_BASEADDR_H_REG);
+ regs[11] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_NUM_REG);
+ regs[12] = dsaf_read_dev(queue, RCB_RING_TX_RING_BD_LEN_REG);
+ regs[13] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTLINE_REG);
+ regs[15] = dsaf_read_dev(queue, RCB_RING_TX_RING_TAIL_REG);
+ regs[16] = dsaf_read_dev(queue, RCB_RING_TX_RING_HEAD_REG);
+ regs[17] = dsaf_read_dev(queue, RCB_RING_TX_RING_FBDNUM_REG);
+ regs[18] = dsaf_read_dev(queue, RCB_RING_TX_RING_OFFSET_REG);
+ regs[19] = dsaf_read_dev(queue, RCB_RING_TX_RING_PKTNUM_RECORD_REG);
+
+ regs[20] = dsaf_read_dev(queue, RCB_RING_PREFETCH_EN_REG);
+ regs[21] = dsaf_read_dev(queue, RCB_RING_CFG_VF_NUM_REG);
+ regs[22] = dsaf_read_dev(queue, RCB_RING_ASID_REG);
+ regs[23] = dsaf_read_dev(queue, RCB_RING_RX_VM_REG);
+ regs[24] = dsaf_read_dev(queue, RCB_RING_T0_BE_RST);
+ regs[25] = dsaf_read_dev(queue, RCB_RING_COULD_BE_RST);
+ regs[26] = dsaf_read_dev(queue, RCB_RING_WRR_WEIGHT_REG);
+
+ regs[27] = dsaf_read_dev(queue, RCB_RING_INTMSK_RXWL_REG);
+ regs[28] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_RING_REG);
+ regs[29] = dsaf_read_dev(queue, RCB_RING_INTMSK_TXWL_REG);
+ regs[30] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_RING_REG);
+ regs[31] = dsaf_read_dev(queue, RCB_RING_INTMSK_RX_OVERTIME_REG);
+ regs[32] = dsaf_read_dev(queue, RCB_RING_INTSTS_RX_OVERTIME_REG);
+ regs[33] = dsaf_read_dev(queue, RCB_RING_INTMSK_TX_OVERTIME_REG);
+ regs[34] = dsaf_read_dev(queue, RCB_RING_INTSTS_TX_OVERTIME_REG);
+
+ /* mark end of ring regs */
+ for (i = 35; i < 40; i++)
+ regs[i] = 0xcccccc00 + ring_pair->index;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
new file mode 100644
index 000000000..a9f805925
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_rcb.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_DSAF_RCB_H
+#define _HNS_DSAF_RCB_H
+
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+
+#include "hnae.h"
+#include "hns_dsaf_main.h"
+
+struct rcb_common_cb;
+
+#define HNS_RCB_IRQ_NUM_PER_QUEUE 2
+#define HNS_RCB_IRQ_IDX_TX 0
+#define HNS_RCB_IRQ_IDX_RX 1
+#define HNS_RCB_TX_REG_OFFSET 0x40
+
+#define HNS_RCB_SERVICE_NW_ENGINE_NUM DSAF_COMM_CHN
+#define HNS_RCB_DEBUG_NW_ENGINE_NUM 1
+#define HNS_RCB_RING_MAX_BD_PER_PKT 3
+#define HNS_RCB_RING_MAX_TXBD_PER_PKT 3
+#define HNS_RCBV2_RING_MAX_TXBD_PER_PKT 8
+#define HNS_RCB_MAX_PKT_SIZE MAC_MAX_MTU
+
+#define HNS_RCB_RING_MAX_PENDING_BD 1024
+#define HNS_RCB_RING_MIN_PENDING_BD 16
+
+#define HNS_RCB_REG_OFFSET 0x10000
+
+#define HNS_RCB_TX_FRAMES_LOW 1
+#define HNS_RCB_RX_FRAMES_LOW 1
+#define HNS_RCB_TX_FRAMES_HIGH 1023
+#define HNS_RCB_RX_FRAMES_HIGH 1023
+#define HNS_RCB_TX_USECS_LOW 1
+#define HNS_RCB_RX_USECS_LOW 1
+#define HNS_RCB_TX_USECS_HIGH 1023
+#define HNS_RCB_RX_USECS_HIGH 1023
+#define HNS_RCB_MAX_COALESCED_FRAMES 1023
+#define HNS_RCB_MIN_COALESCED_FRAMES 1
+#define HNS_RCB_DEF_RX_COALESCED_FRAMES 50
+#define HNS_RCB_DEF_TX_COALESCED_FRAMES 1
+#define HNS_RCB_CLK_FREQ_MHZ 350
+#define HNS_RCB_MAX_COALESCED_USECS 0x3ff
+#define HNS_RCB_DEF_COALESCED_USECS 30
+#define HNS_RCB_DEF_GAP_TIME_USECS 20
+#define HNS_RCB_TX_PKTLINE_OFFSET 8
+
+#define HNS_RCB_COMMON_ENDIAN 1
+
+#define HNS_BD_SIZE_512_TYPE 0
+#define HNS_BD_SIZE_1024_TYPE 1
+#define HNS_BD_SIZE_2048_TYPE 2
+#define HNS_BD_SIZE_4096_TYPE 3
+
+#define HNS_RCB_COMMON_DUMP_REG_NUM 80
+#define HNS_RCB_RING_DUMP_REG_NUM 40
+#define HNS_RING_STATIC_REG_NUM 28
+
+#define HNS_DUMP_REG_NUM 500
+#define HNS_STATIC_REG_NUM 12
+
+#define HNS_TSO_MODE_8BD_32K 1
+#define HNS_TSO_MDOE_4BD_16K 0
+
+enum rcb_int_flag {
+ RCB_INT_FLAG_TX = 0x1,
+ RCB_INT_FLAG_RX = (0x1 << 1),
+ RCB_INT_FLAG_MAX = (0x1 << 2), /*must be the last element */
+};
+
+struct hns_ring_hw_stats {
+ u64 tx_pkts;
+ u64 ppe_tx_ok_pkts;
+ u64 ppe_tx_drop_pkts;
+ u64 rx_pkts;
+ u64 ppe_rx_ok_pkts;
+ u64 ppe_rx_drop_pkts;
+};
+
+struct ring_pair_cb {
+ struct rcb_common_cb *rcb_common; /* ring belongs to */
+ struct device *dev; /*device for DMA mapping */
+ struct hnae_queue q;
+
+ u16 index; /* global index in a rcb common device */
+ u16 buf_size;
+
+ int virq[HNS_RCB_IRQ_NUM_PER_QUEUE];
+
+ u8 port_id_in_comm;
+ u8 used_by_vf;
+
+ struct hns_ring_hw_stats hw_stats;
+};
+
+struct rcb_common_cb {
+ u8 __iomem *io_base;
+ phys_addr_t phy_base;
+ struct dsaf_device *dsaf_dev;
+ u16 max_vfn;
+ u16 max_q_per_vf;
+
+ u8 comm_index;
+ u32 ring_num;
+ u32 desc_num; /* desc num per queue*/
+
+ struct ring_pair_cb ring_pair_cb[];
+};
+
+int hns_rcb_buf_size2type(u32 buf_size);
+
+int hns_rcb_common_get_cfg(struct dsaf_device *dsaf_dev, int comm_index);
+void hns_rcb_common_free_cfg(struct dsaf_device *dsaf_dev, u32 comm_index);
+int hns_rcb_common_init_hw(struct rcb_common_cb *rcb_common);
+void hns_rcb_start(struct hnae_queue *q, u32 val);
+int hns_rcb_get_cfg(struct rcb_common_cb *rcb_common);
+void hns_rcb_get_queue_mode(enum dsaf_mode dsaf_mode,
+ u16 *max_vfn, u16 *max_q_per_vf);
+
+void hns_rcb_common_init_commit_hw(struct rcb_common_cb *rcb_common);
+
+void hns_rcb_ring_enable_hw(struct hnae_queue *q, u32 val);
+void hns_rcb_int_clr_hw(struct hnae_queue *q, u32 flag);
+void hns_rcb_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 enable);
+void hns_rcbv2_int_ctrl_hw(struct hnae_queue *q, u32 flag, u32 mask);
+void hns_rcbv2_int_clr_hw(struct hnae_queue *q, u32 flag);
+
+void hns_rcb_init_hw(struct ring_pair_cb *ring);
+void hns_rcb_reset_ring_hw(struct hnae_queue *q);
+void hns_rcb_wait_fbd_clean(struct hnae_queue **qs, int q_num, u32 flag);
+int hns_rcb_wait_tx_ring_clean(struct hnae_queue *qs);
+u32 hns_rcb_get_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+u32 hns_rcb_get_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx);
+int hns_rcb_set_coalesce_usecs(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 timeout);
+int hns_rcb_set_rx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
+int hns_rcb_set_tx_coalesced_frames(
+ struct rcb_common_cb *rcb_common, u32 port_idx, u32 coalesced_frames);
+void hns_rcb_update_stats(struct hnae_queue *queue);
+
+void hns_rcb_get_stats(struct hnae_queue *queue, u64 *data);
+
+void hns_rcb_get_common_regs(struct rcb_common_cb *rcb_common, void *data);
+
+int hns_rcb_get_ring_sset_count(int stringset);
+int hns_rcb_get_common_regs_count(void);
+int hns_rcb_get_ring_regs_count(void);
+
+void hns_rcb_get_ring_regs(struct hnae_queue *queue, void *data);
+
+void hns_rcb_get_strings(int stringset, u8 *data, int index);
+void hns_rcb_set_rx_ring_bs(struct hnae_queue *q, u32 buf_size);
+void hns_rcb_set_tx_ring_bs(struct hnae_queue *q, u32 buf_size);
+
+#endif /* _HNS_DSAF_RCB_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
new file mode 100644
index 000000000..47ccb6e0f
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_reg.h
@@ -0,0 +1,1095 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _DSAF_REG_H_
+#define _DSAF_REG_H_
+
+#include <linux/regmap.h>
+#define HNS_DEBUG_RING_IRQ_IDX 0
+#define HNS_SERVICE_RING_IRQ_IDX 59
+#define HNSV2_SERVICE_RING_IRQ_IDX 25
+
+#define DSAF_MAX_PORT_NUM 6
+#define DSAF_MAX_VM_NUM 128
+
+#define DSAF_COMM_DEV_NUM 1
+#define DSAF_PPE_INODE_BASE 6
+#define DSAF_DEBUG_NW_NUM 2
+#define DSAF_SERVICE_NW_NUM 6
+#define DSAF_COMM_CHN DSAF_SERVICE_NW_NUM
+#define DSAF_GE_NUM ((DSAF_SERVICE_NW_NUM) + (DSAF_DEBUG_NW_NUM))
+#define DSAF_XGE_NUM DSAF_SERVICE_NW_NUM
+#define DSAF_PORT_TYPE_NUM 3
+#define DSAF_NODE_NUM 18
+#define DSAF_XOD_BIG_NUM DSAF_NODE_NUM
+#define DSAF_SBM_NUM DSAF_NODE_NUM
+#define DSAFV2_SBM_NUM 8
+#define DSAFV2_SBM_XGE_CHN 6
+#define DSAFV2_SBM_PPE_CHN 1
+#define DASFV2_ROCEE_CRD_NUM 1
+
+#define DSAF_VOQ_NUM DSAF_NODE_NUM
+#define DSAF_INODE_NUM DSAF_NODE_NUM
+#define DSAF_XOD_NUM 8
+#define DSAF_TBL_NUM 8
+#define DSAF_SW_PORT_NUM 8
+#define DSAF_TOTAL_QUEUE_NUM 129
+
+/* reserved a tcam entry for each port to support promisc by fuzzy match */
+#define DSAFV2_MAC_FUZZY_TCAM_NUM DSAF_MAX_PORT_NUM
+
+#define DSAF_TCAM_SUM 512
+#define DSAF_LINE_SUM (2048 * 14)
+
+#define DSAF_SUB_SC_NT_SRAM_CLK_SEL_REG 0x100
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG 0x180
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG 0x184
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL2_REG 0x188
+#define DSAF_SUB_SC_HILINK3_CRG_CTRL3_REG 0x18C
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG 0x190
+#define DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG 0x194
+#define DSAF_SUB_SC_DSAF_CLK_EN_REG 0x300
+#define DSAF_SUB_SC_DSAF_CLK_DIS_REG 0x304
+#define DSAF_SUB_SC_NT_CLK_EN_REG 0x308
+#define DSAF_SUB_SC_NT_CLK_DIS_REG 0x30C
+#define DSAF_SUB_SC_XGE_CLK_EN_REG 0x310
+#define DSAF_SUB_SC_XGE_CLK_DIS_REG 0x314
+#define DSAF_SUB_SC_GE_CLK_EN_REG 0x318
+#define DSAF_SUB_SC_GE_CLK_DIS_REG 0x31C
+#define DSAF_SUB_SC_PPE_CLK_EN_REG 0x320
+#define DSAF_SUB_SC_PPE_CLK_DIS_REG 0x324
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_EN_REG 0x350
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_DIS_REG 0x354
+#define DSAF_SUB_SC_XBAR_RESET_REQ_REG 0xA00
+#define DSAF_SUB_SC_XBAR_RESET_DREQ_REG 0xA04
+#define DSAF_SUB_SC_NT_RESET_REQ_REG 0xA08
+#define DSAF_SUB_SC_NT_RESET_DREQ_REG 0xA0C
+#define DSAF_SUB_SC_XGE_RESET_REQ_REG 0xA10
+#define DSAF_SUB_SC_XGE_RESET_DREQ_REG 0xA14
+#define DSAF_SUB_SC_GE_RESET_REQ0_REG 0xA18
+#define DSAF_SUB_SC_GE_RESET_DREQ0_REG 0xA1C
+#define DSAF_SUB_SC_GE_RESET_REQ1_REG 0xA20
+#define DSAF_SUB_SC_GE_RESET_DREQ1_REG 0xA24
+#define DSAF_SUB_SC_PPE_RESET_REQ_REG 0xA48
+#define DSAF_SUB_SC_PPE_RESET_DREQ_REG 0xA4C
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_REQ_REG 0xA88
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_DREQ_REG 0xA8C
+#define DSAF_SUB_SC_DSAF_RESET_REQ_REG 0xAA8
+#define DSAF_SUB_SC_DSAF_RESET_DREQ_REG 0xAAC
+#define DSAF_SUB_SC_ROCEE_RESET_REQ_REG 0xA50
+#define DSAF_SUB_SC_ROCEE_RESET_DREQ_REG 0xA54
+#define DSAF_SUB_SC_ROCEE_CLK_DIS_REG 0x32C
+#define DSAF_SUB_SC_ROCEE_CLK_EN_REG 0x328
+#define DSAF_SUB_SC_LIGHT_MODULE_DETECT_EN_REG 0x2060
+#define DSAF_SUB_SC_TCAM_MBIST_EN_REG 0x2300
+#define DSAF_SUB_SC_DSAF_CLK_ST_REG 0x5300
+#define DSAF_SUB_SC_NT_CLK_ST_REG 0x5304
+#define DSAF_SUB_SC_XGE_CLK_ST_REG 0x5308
+#define DSAF_SUB_SC_GE_CLK_ST_REG 0x530C
+#define DSAF_SUB_SC_PPE_CLK_ST_REG 0x5310
+#define DSAF_SUB_SC_ROCEE_CLK_ST_REG 0x5314
+#define DSAF_SUB_SC_CPU_CLK_ST_REG 0x5318
+#define DSAF_SUB_SC_RCB_PPE_COM_CLK_ST_REG 0x5328
+#define DSAF_SUB_SC_XBAR_RESET_ST_REG 0x5A00
+#define DSAF_SUB_SC_NT_RESET_ST_REG 0x5A04
+#define DSAF_SUB_SC_XGE_RESET_ST_REG 0x5A08
+#define DSAF_SUB_SC_GE_RESET_ST0_REG 0x5A0C
+#define DSAF_SUB_SC_GE_RESET_ST1_REG 0x5A10
+#define DSAF_SUB_SC_PPE_RESET_ST_REG 0x5A24
+#define DSAF_SUB_SC_RCB_PPE_COM_RESET_ST_REG 0x5A44
+
+/*serdes offset**/
+#define HNS_MAC_HILINK3_REG DSAF_SUB_SC_HILINK3_CRG_CTRL0_REG
+#define HNS_MAC_HILINK4_REG DSAF_SUB_SC_HILINK4_CRG_CTRL0_REG
+#define HNS_MAC_HILINK3V2_REG DSAF_SUB_SC_HILINK3_CRG_CTRL1_REG
+#define HNS_MAC_HILINK4V2_REG DSAF_SUB_SC_HILINK4_CRG_CTRL1_REG
+#define HNS_MAC_LANE0_CTLEDFE_REG 0x000BFFCCULL
+#define HNS_MAC_LANE1_CTLEDFE_REG 0x000BFFBCULL
+#define HNS_MAC_LANE2_CTLEDFE_REG 0x000BFFACULL
+#define HNS_MAC_LANE3_CTLEDFE_REG 0x000BFF9CULL
+#define HNS_MAC_LANE0_STATE_REG 0x000BFFD4ULL
+#define HNS_MAC_LANE1_STATE_REG 0x000BFFC4ULL
+#define HNS_MAC_LANE2_STATE_REG 0x000BFFB4ULL
+#define HNS_MAC_LANE3_STATE_REG 0x000BFFA4ULL
+
+#define HILINK_RESET_TIMOUT 10000
+
+#define DSAF_SRAM_INIT_OVER_0_REG 0x0
+#define DSAF_CFG_0_REG 0x4
+#define DSAF_ECC_ERR_INVERT_0_REG 0x8
+#define DSAF_ABNORMAL_TIMEOUT_0_REG 0x1C
+#define DSAF_FSM_TIMEOUT_0_REG 0x20
+#define DSAF_DSA_REG_CNT_CLR_CE_REG 0x2C
+#define DSAF_DSA_SBM_INF_FIFO_THRD_REG 0x30
+#define DSAF_DSA_SRAM_1BIT_ECC_SEL_REG 0x34
+#define DSAF_DSA_SRAM_1BIT_ECC_CNT_REG 0x38
+#define DSAF_PFC_EN_0_REG 0x50
+#define DSAF_PFC_UNIT_CNT_0_REG 0x70
+#define DSAF_XGE_INT_MSK_0_REG 0x100
+#define DSAF_PPE_INT_MSK_0_REG 0x120
+#define DSAF_ROCEE_INT_MSK_0_REG 0x140
+#define DSAF_XGE_INT_SRC_0_REG 0x160
+#define DSAF_PPE_INT_SRC_0_REG 0x180
+#define DSAF_ROCEE_INT_SRC_0_REG 0x1A0
+#define DSAF_XGE_INT_STS_0_REG 0x1C0
+#define DSAF_PPE_INT_STS_0_REG 0x1E0
+#define DSAF_ROCEE_INT_STS_0_REG 0x200
+#define DSAFV2_SERDES_LBK_0_REG 0x220
+#define DSAF_PAUSE_CFG_REG 0x240
+#define DSAF_ROCE_PORT_MAP_REG 0x2A0
+#define DSAF_ROCE_SL_MAP_REG 0x2A4
+#define DSAF_PPE_QID_CFG_0_REG 0x300
+#define DSAF_SW_PORT_TYPE_0_REG 0x320
+#define DSAF_STP_PORT_TYPE_0_REG 0x340
+#define DSAF_MIX_DEF_QID_0_REG 0x360
+#define DSAF_PORT_DEF_VLAN_0_REG 0x380
+#define DSAF_VM_DEF_VLAN_0_REG 0x400
+
+#define DSAF_INODE_CUT_THROUGH_CFG_0_REG 0x1000
+#define DSAF_INODE_ECC_INVERT_EN_0_REG 0x1008
+#define DSAF_INODE_ECC_ERR_ADDR_0_REG 0x100C
+#define DSAF_INODE_IN_PORT_NUM_0_REG 0x1018
+#define DSAF_INODE_PRI_TC_CFG_0_REG 0x101C
+#define DSAF_INODE_BP_STATUS_0_REG 0x1020
+#define DSAF_INODE_PAD_DISCARD_NUM_0_REG 0x1028
+#define DSAF_INODE_FINAL_IN_MAN_NUM_0_REG 0x102C
+#define DSAF_INODE_FINAL_IN_PKT_NUM_0_REG 0x1030
+#define DSAF_INODE_SBM_PID_NUM_0_REG 0x1038
+#define DSAF_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x103C
+#define DSAFV2_INODE_FINAL_IN_PAUSE_NUM_0_REG 0x1024
+#define DSAF_INODE_SBM_RELS_NUM_0_REG 0x104C
+#define DSAF_INODE_SBM_DROP_NUM_0_REG 0x1050
+#define DSAF_INODE_CRC_FALSE_NUM_0_REG 0x1054
+#define DSAF_INODE_BP_DISCARD_NUM_0_REG 0x1058
+#define DSAF_INODE_RSLT_DISCARD_NUM_0_REG 0x105C
+#define DSAF_INODE_LOCAL_ADDR_FALSE_NUM_0_REG 0x1060
+#define DSAF_INODE_VOQ_OVER_NUM_0_REG 0x1068
+#define DSAF_INODE_BD_SAVE_STATUS_0_REG 0x1900
+#define DSAF_INODE_BD_ORDER_STATUS_0_REG 0x1950
+#define DSAF_INODE_SW_VLAN_TAG_DISC_0_REG 0x1A00
+#define DSAF_INODE_IN_DATA_STP_DISC_0_REG 0x1A50
+#define DSAF_INODE_GE_FC_EN_0_REG 0x1B00
+#define DSAF_INODE_VC0_IN_PKT_NUM_0_REG 0x1B50
+#define DSAF_INODE_VC1_IN_PKT_NUM_0_REG 0x103C
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_REG 0x1C00
+#define DSAF_INODE_IN_PRIO_PAUSE_BASE_OFFSET 0x100
+#define DSAF_INODE_IN_PRIO_PAUSE_OFFSET 0x50
+
+#define DSAF_SBM_CFG_REG_0_REG 0x2000
+#define DSAF_SBM_BP_CFG_0_XGE_REG_0_REG 0x2004
+#define DSAF_SBM_BP_CFG_0_PPE_REG_0_REG 0x2304
+#define DSAF_SBM_BP_CFG_0_ROCEE_REG_0_REG 0x2604
+#define DSAF_SBM_BP_CFG_1_REG_0_REG 0x2008
+#define DSAF_SBM_BP_CFG_2_XGE_REG_0_REG 0x200C
+#define DSAF_SBM_BP_CFG_2_PPE_REG_0_REG 0x230C
+#define DSAF_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x260C
+#define DSAF_SBM_ROCEE_CFG_REG_REG 0x2380
+#define DSAFV2_SBM_BP_CFG_2_ROCEE_REG_0_REG 0x238C
+#define DSAF_SBM_FREE_CNT_0_0_REG 0x2010
+#define DSAF_SBM_FREE_CNT_1_0_REG 0x2014
+#define DSAF_SBM_BP_CNT_0_0_REG 0x2018
+#define DSAF_SBM_BP_CNT_1_0_REG 0x201C
+#define DSAF_SBM_BP_CNT_2_0_REG 0x2020
+#define DSAF_SBM_BP_CNT_3_0_REG 0x2024
+#define DSAF_SBM_INER_ST_0_REG 0x2028
+#define DSAF_SBM_MIB_REQ_FAILED_TC_0_REG 0x202C
+#define DSAF_SBM_LNK_INPORT_CNT_0_REG 0x2030
+#define DSAF_SBM_LNK_DROP_CNT_0_REG 0x2034
+#define DSAF_SBM_INF_OUTPORT_CNT_0_REG 0x2038
+#define DSAF_SBM_LNK_INPORT_TC0_CNT_0_REG 0x203C
+#define DSAF_SBM_LNK_INPORT_TC1_CNT_0_REG 0x2040
+#define DSAF_SBM_LNK_INPORT_TC2_CNT_0_REG 0x2044
+#define DSAF_SBM_LNK_INPORT_TC3_CNT_0_REG 0x2048
+#define DSAF_SBM_LNK_INPORT_TC4_CNT_0_REG 0x204C
+#define DSAF_SBM_LNK_INPORT_TC5_CNT_0_REG 0x2050
+#define DSAF_SBM_LNK_INPORT_TC6_CNT_0_REG 0x2054
+#define DSAF_SBM_LNK_INPORT_TC7_CNT_0_REG 0x2058
+#define DSAF_SBM_LNK_REQ_CNT_0_REG 0x205C
+#define DSAF_SBM_LNK_RELS_CNT_0_REG 0x2060
+#define DSAF_SBM_BP_CFG_3_REG_0_REG 0x2068
+#define DSAF_SBM_BP_CFG_4_REG_0_REG 0x206C
+
+#define DSAF_XOD_ETS_TSA_TC0_TC3_CFG_0_REG 0x3000
+#define DSAF_XOD_ETS_TSA_TC4_TC7_CFG_0_REG 0x3004
+#define DSAF_XOD_ETS_BW_TC0_TC3_CFG_0_REG 0x3008
+#define DSAF_XOD_ETS_BW_TC4_TC7_CFG_0_REG 0x300C
+#define DSAF_XOD_ETS_BW_OFFSET_CFG_0_REG 0x3010
+#define DSAF_XOD_ETS_TOKEN_CFG_0_REG 0x3014
+#define DSAF_XOD_PFS_CFG_0_0_REG 0x3018
+#define DSAF_XOD_PFS_CFG_1_0_REG 0x301C
+#define DSAF_XOD_PFS_CFG_2_0_REG 0x3020
+#define DSAF_XOD_GNT_L_0_REG 0x3024
+#define DSAF_XOD_GNT_H_0_REG 0x3028
+#define DSAF_XOD_CONNECT_STATE_0_REG 0x302C
+#define DSAF_XOD_RCVPKT_CNT_0_REG 0x3030
+#define DSAF_XOD_RCVTC0_CNT_0_REG 0x3034
+#define DSAF_XOD_RCVTC1_CNT_0_REG 0x3038
+#define DSAF_XOD_RCVTC2_CNT_0_REG 0x303C
+#define DSAF_XOD_RCVTC3_CNT_0_REG 0x3040
+#define DSAF_XOD_RCVVC0_CNT_0_REG 0x3044
+#define DSAF_XOD_RCVVC1_CNT_0_REG 0x3048
+#define DSAF_XOD_XGE_RCVIN0_CNT_0_REG 0x304C
+#define DSAF_XOD_XGE_RCVIN1_CNT_0_REG 0x3050
+#define DSAF_XOD_XGE_RCVIN2_CNT_0_REG 0x3054
+#define DSAF_XOD_XGE_RCVIN3_CNT_0_REG 0x3058
+#define DSAF_XOD_XGE_RCVIN4_CNT_0_REG 0x305C
+#define DSAF_XOD_XGE_RCVIN5_CNT_0_REG 0x3060
+#define DSAF_XOD_XGE_RCVIN6_CNT_0_REG 0x3064
+#define DSAF_XOD_XGE_RCVIN7_CNT_0_REG 0x3068
+#define DSAF_XOD_PPE_RCVIN0_CNT_0_REG 0x306C
+#define DSAF_XOD_PPE_RCVIN1_CNT_0_REG 0x3070
+#define DSAF_XOD_ROCEE_RCVIN0_CNT_0_REG 0x3074
+#define DSAF_XOD_ROCEE_RCVIN1_CNT_0_REG 0x3078
+#define DSAF_XOD_FIFO_STATUS_0_REG 0x307C
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_BASE_REG 0x3A00
+#define DSAF_XOD_XGE_PFC_PRIO_CNT_OFFSET 0x4
+
+#define DSAF_VOQ_ECC_INVERT_EN_0_REG 0x4004
+#define DSAF_VOQ_SRAM_PKT_NUM_0_REG 0x4008
+#define DSAF_VOQ_IN_PKT_NUM_0_REG 0x400C
+#define DSAF_VOQ_OUT_PKT_NUM_0_REG 0x4010
+#define DSAF_VOQ_ECC_ERR_ADDR_0_REG 0x4014
+#define DSAF_VOQ_BP_STATUS_0_REG 0x4018
+#define DSAF_VOQ_SPUP_IDLE_0_REG 0x401C
+#define DSAF_VOQ_XGE_XOD_REQ_0_0_REG 0x4024
+#define DSAF_VOQ_XGE_XOD_REQ_1_0_REG 0x4028
+#define DSAF_VOQ_PPE_XOD_REQ_0_REG 0x402C
+#define DSAF_VOQ_ROCEE_XOD_REQ_0_REG 0x4030
+#define DSAF_VOQ_BP_ALL_THRD_0_REG 0x4034
+
+#define DSAF_TBL_CTRL_0_REG 0x5000
+#define DSAF_TBL_INT_MSK_0_REG 0x5004
+#define DSAF_TBL_INT_SRC_0_REG 0x5008
+#define DSAF_TBL_INT_STS_0_REG 0x5100
+#define DSAF_TBL_TCAM_ADDR_0_REG 0x500C
+#define DSAF_TBL_LINE_ADDR_0_REG 0x5010
+#define DSAF_TBL_TCAM_HIGH_0_REG 0x5014
+#define DSAF_TBL_TCAM_LOW_0_REG 0x5018
+#define DSAF_TBL_TCAM_MCAST_CFG_4_0_REG 0x501C
+#define DSAF_TBL_TCAM_MCAST_CFG_3_0_REG 0x5020
+#define DSAF_TBL_TCAM_MCAST_CFG_2_0_REG 0x5024
+#define DSAF_TBL_TCAM_MCAST_CFG_1_0_REG 0x5028
+#define DSAF_TBL_TCAM_MCAST_CFG_0_0_REG 0x502C
+#define DSAF_TBL_TCAM_UCAST_CFG_0_REG 0x5030
+#define DSAF_TBL_LIN_CFG_0_REG 0x5034
+#define DSAF_TBL_TCAM_RDATA_HIGH_0_REG 0x5038
+#define DSAF_TBL_TCAM_RDATA_LOW_0_REG 0x503C
+#define DSAF_TBL_TCAM_RAM_RDATA4_0_REG 0x5040
+#define DSAF_TBL_TCAM_RAM_RDATA3_0_REG 0x5044
+#define DSAF_TBL_TCAM_RAM_RDATA2_0_REG 0x5048
+#define DSAF_TBL_TCAM_RAM_RDATA1_0_REG 0x504C
+#define DSAF_TBL_TCAM_RAM_RDATA0_0_REG 0x5050
+#define DSAF_TBL_LIN_RDATA_0_REG 0x5054
+#define DSAF_TBL_DA0_MIS_INFO1_0_REG 0x5058
+#define DSAF_TBL_DA0_MIS_INFO0_0_REG 0x505C
+#define DSAF_TBL_SA_MIS_INFO2_0_REG 0x5104
+#define DSAF_TBL_SA_MIS_INFO1_0_REG 0x5098
+#define DSAF_TBL_SA_MIS_INFO0_0_REG 0x509C
+#define DSAF_TBL_PUL_0_REG 0x50A0
+#define DSAF_TBL_OLD_RSLT_0_REG 0x50A4
+#define DSAF_TBL_OLD_SCAN_VAL_0_REG 0x50A8
+#define DSAF_TBL_DFX_CTRL_0_REG 0x50AC
+#define DSAF_TBL_DFX_STAT_0_REG 0x50B0
+#define DSAF_TBL_DFX_STAT_2_0_REG 0x5108
+#define DSAF_TBL_LKUP_NUM_I_0_REG 0x50C0
+#define DSAF_TBL_LKUP_NUM_O_0_REG 0x50E0
+#define DSAF_TBL_UCAST_BCAST_MIS_INFO_0_0_REG 0x510C
+#define DSAF_TBL_TCAM_MATCH_CFG_H_REG 0x5130
+#define DSAF_TBL_TCAM_MATCH_CFG_L_REG 0x5134
+
+#define DSAF_INODE_FIFO_WL_0_REG 0x6000
+#define DSAF_ONODE_FIFO_WL_0_REG 0x6020
+#define DSAF_XGE_GE_WORK_MODE_0_REG 0x6040
+#define DSAF_XGE_APP_RX_LINK_UP_0_REG 0x6080
+#define DSAF_NETPORT_CTRL_SIG_0_REG 0x60A0
+#define DSAF_XGE_CTRL_SIG_CFG_0_REG 0x60C0
+
+#define PPE_COM_CFG_QID_MODE_REG 0x0
+#define PPE_COM_INTEN_REG 0x110
+#define PPE_COM_RINT_REG 0x114
+#define PPE_COM_INTSTS_REG 0x118
+#define PPE_COM_HIS_RX_PKT_QID_DROP_CNT_REG 0x300
+#define PPE_COM_HIS_RX_PKT_QID_OK_CNT_REG 0x600
+#define PPE_COM_HIS_TX_PKT_QID_ERR_CNT_REG 0x900
+#define PPE_COM_HIS_TX_PKT_QID_OK_CNT_REG 0xC00
+#define PPE_COM_COMMON_CNT_CLR_CE_REG 0x1120
+
+#define PPE_CFG_TX_FIFO_THRSLD_REG 0x0
+#define PPE_CFG_RX_FIFO_THRSLD_REG 0x4
+#define PPE_CFG_RX_FIFO_PAUSE_THRSLD_REG 0x8
+#define PPE_CFG_RX_FIFO_SW_BP_THRSLD_REG 0xC
+#define PPE_CFG_PAUSE_IDLE_CNT_REG 0x10
+#define PPE_CFG_BUS_CTRL_REG 0x40
+#define PPE_CFG_TNL_TO_BE_RST_REG 0x48
+#define PPE_CURR_TNL_CAN_RST_REG 0x4C
+#define PPE_CFG_XGE_MODE_REG 0x80
+#define PPE_CFG_MAX_FRAME_LEN_REG 0x84
+#define PPE_CFG_RX_PKT_MODE_REG 0x88
+#define PPE_CFG_RX_VLAN_TAG_REG 0x8C
+#define PPE_CFG_TAG_GEN_REG 0x90
+#define PPE_CFG_PARSE_TAG_REG 0x94
+#define PPE_CFG_PRO_CHECK_EN_REG 0x98
+#define PPEV2_CFG_TSO_EN_REG 0xA0
+#define PPEV2_VLAN_STRIP_EN_REG 0xAC
+#define PPE_INTEN_REG 0x100
+#define PPE_RINT_REG 0x104
+#define PPE_INTSTS_REG 0x108
+#define PPE_CFG_RX_PKT_INT_REG 0x140
+#define PPE_CFG_HEAT_DECT_TIME0_REG 0x144
+#define PPE_CFG_HEAT_DECT_TIME1_REG 0x148
+#define PPE_HIS_RX_SW_PKT_CNT_REG 0x200
+#define PPE_HIS_RX_WR_BD_OK_PKT_CNT_REG 0x204
+#define PPE_HIS_RX_PKT_NO_BUF_CNT_REG 0x208
+#define PPE_HIS_TX_BD_CNT_REG 0x20C
+#define PPE_HIS_TX_PKT_CNT_REG 0x210
+#define PPE_HIS_TX_PKT_OK_CNT_REG 0x214
+#define PPE_HIS_TX_PKT_EPT_CNT_REG 0x218
+#define PPE_HIS_TX_PKT_CS_FAIL_CNT_REG 0x21C
+#define PPE_HIS_RX_APP_BUF_FAIL_CNT_REG 0x220
+#define PPE_HIS_RX_APP_BUF_WAIT_CNT_REG 0x224
+#define PPE_HIS_RX_PKT_DROP_FUL_CNT_REG 0x228
+#define PPE_HIS_RX_PKT_DROP_PRT_CNT_REG 0x22C
+#define PPE_TNL_0_5_CNT_CLR_CE_REG 0x300
+#define PPE_CFG_AXI_DBG_REG 0x304
+#define PPE_HIS_PRO_ERR_REG 0x308
+#define PPE_HIS_TNL_FIFO_ERR_REG 0x30C
+#define PPE_CURR_CFF_DATA_NUM_REG 0x310
+#define PPE_CURR_RX_ST_REG 0x314
+#define PPE_CURR_TX_ST_REG 0x318
+#define PPE_CURR_RX_FIFO0_REG 0x31C
+#define PPE_CURR_RX_FIFO1_REG 0x320
+#define PPE_CURR_TX_FIFO0_REG 0x324
+#define PPE_CURR_TX_FIFO1_REG 0x328
+#define PPE_ECO0_REG 0x32C
+#define PPE_ECO1_REG 0x330
+#define PPE_ECO2_REG 0x334
+#define PPEV2_INDRECTION_TBL_REG 0x800
+#define PPEV2_RSS_KEY_REG 0x900
+
+#define RCB_COM_CFG_ENDIAN_REG 0x0
+#define RCB_COM_CFG_SYS_FSH_REG 0xC
+#define RCB_COM_CFG_INIT_FLAG_REG 0x10
+#define RCB_COM_CFG_PKT_REG 0x30
+#define RCB_COM_CFG_RINVLD_REG 0x34
+#define RCB_COM_CFG_FNA_REG 0x38
+#define RCB_COM_CFG_FA_REG 0x3C
+#define RCB_COM_CFG_PKT_TC_BP_REG 0x40
+#define RCB_COM_CFG_PPE_TNL_CLKEN_REG 0x44
+#define RCBV2_COM_CFG_USER_REG 0x30
+#define RCBV2_COM_CFG_TSO_MODE_REG 0x50
+
+#define RCB_COM_INTMSK_TX_PKT_REG 0x3A0
+#define RCB_COM_RINT_TX_PKT_REG 0x3A8
+#define RCB_COM_INTMASK_ECC_ERR_REG 0x400
+#define RCB_COM_INTSTS_ECC_ERR_REG 0x408
+#define RCB_COM_EBD_SRAM_ERR_REG 0x410
+#define RCB_COM_RXRING_ERR_REG 0x41C
+#define RCB_COM_TXRING_ERR_REG 0x420
+#define RCB_COM_TX_FBD_ERR_REG 0x424
+#define RCB_SRAM_ECC_CHK_EN_REG 0x428
+#define RCB_SRAM_ECC_CHK0_REG 0x42C
+#define RCB_SRAM_ECC_CHK1_REG 0x430
+#define RCB_SRAM_ECC_CHK2_REG 0x434
+#define RCB_SRAM_ECC_CHK3_REG 0x438
+#define RCB_SRAM_ECC_CHK4_REG 0x43c
+#define RCB_SRAM_ECC_CHK5_REG 0x440
+#define RCB_ECC_ERR_ADDR0_REG 0x450
+#define RCB_ECC_ERR_ADDR3_REG 0x45C
+#define RCB_ECC_ERR_ADDR4_REG 0x460
+#define RCB_ECC_ERR_ADDR5_REG 0x464
+
+#define RCB_COM_SF_CFG_INTMASK_RING 0x470
+#define RCB_COM_SF_CFG_RING_STS 0x474
+#define RCB_COM_SF_CFG_RING 0x478
+#define RCB_COM_SF_CFG_INTMASK_BD 0x47C
+#define RCB_COM_SF_CFG_BD_RINT_STS 0x480
+#define RCB_COM_RCB_RD_BD_BUSY 0x490
+#define RCB_COM_RCB_FBD_CRT_EN 0x494
+#define RCB_COM_AXI_WR_ERR_INTMASK 0x498
+#define RCB_COM_AXI_ERR_STS 0x49C
+#define RCB_COM_CHK_TX_FBD_NUM_REG 0x4a0
+
+#define RCB_CFG_BD_NUM_REG 0x9000
+#define RCB_CFG_PKTLINE_REG 0x9050
+
+#define RCB_CFG_OVERTIME_REG 0x9300
+#define RCB_CFG_PKTLINE_INT_NUM_REG 0x9304
+#define RCB_CFG_OVERTIME_INT_NUM_REG 0x9308
+#define RCB_PORT_INT_GAPTIME_REG 0x9400
+#define RCB_PORT_CFG_OVERTIME_REG 0x9430
+
+#define RCB_RING_RX_RING_BASEADDR_L_REG 0x00000
+#define RCB_RING_RX_RING_BASEADDR_H_REG 0x00004
+#define RCB_RING_RX_RING_BD_NUM_REG 0x00008
+#define RCB_RING_RX_RING_BD_LEN_REG 0x0000C
+#define RCB_RING_RX_RING_PKTLINE_REG 0x00010
+#define RCB_RING_RX_RING_TAIL_REG 0x00018
+#define RCB_RING_RX_RING_HEAD_REG 0x0001C
+#define RCB_RING_RX_RING_FBDNUM_REG 0x00020
+#define RCB_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C
+
+#define RCB_RING_TX_RING_BASEADDR_L_REG 0x00040
+#define RCB_RING_TX_RING_BASEADDR_H_REG 0x00044
+#define RCB_RING_TX_RING_BD_NUM_REG 0x00048
+#define RCB_RING_TX_RING_BD_LEN_REG 0x0004C
+#define RCB_RING_TX_RING_PKTLINE_REG 0x00050
+#define RCB_RING_TX_RING_TAIL_REG 0x00058
+#define RCB_RING_TX_RING_HEAD_REG 0x0005C
+#define RCB_RING_TX_RING_FBDNUM_REG 0x00060
+#define RCB_RING_TX_RING_OFFSET_REG 0x00064
+#define RCB_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
+
+#define RCB_RING_PREFETCH_EN_REG 0x0007C
+#define RCB_RING_CFG_VF_NUM_REG 0x00080
+#define RCB_RING_ASID_REG 0x0008C
+#define RCB_RING_RX_VM_REG 0x00090
+#define RCB_RING_T0_BE_RST 0x00094
+#define RCB_RING_COULD_BE_RST 0x00098
+#define RCB_RING_WRR_WEIGHT_REG 0x0009c
+
+#define RCB_RING_INTMSK_RXWL_REG 0x000A0
+#define RCB_RING_INTSTS_RX_RING_REG 0x000A4
+#define RCBV2_RX_RING_INT_STS_REG 0x000A8
+#define RCB_RING_INTMSK_TXWL_REG 0x000AC
+#define RCB_RING_INTSTS_TX_RING_REG 0x000B0
+#define RCBV2_TX_RING_INT_STS_REG 0x000B4
+#define RCB_RING_INTMSK_RX_OVERTIME_REG 0x000B8
+#define RCB_RING_INTSTS_RX_OVERTIME_REG 0x000BC
+#define RCB_RING_INTMSK_TX_OVERTIME_REG 0x000C4
+#define RCB_RING_INTSTS_TX_OVERTIME_REG 0x000C8
+
+#define GMAC_FIFO_STATE_REG 0x0000UL
+#define GMAC_DUPLEX_TYPE_REG 0x0008UL
+#define GMAC_FD_FC_TYPE_REG 0x000CUL
+#define GMAC_TX_WATER_LINE_REG 0x0010UL
+#define GMAC_FC_TX_TIMER_REG 0x001CUL
+#define GMAC_FD_FC_ADDR_LOW_REG 0x0020UL
+#define GMAC_FD_FC_ADDR_HIGH_REG 0x0024UL
+#define GMAC_IPG_TX_TIMER_REG 0x0030UL
+#define GMAC_PAUSE_THR_REG 0x0038UL
+#define GMAC_MAX_FRM_SIZE_REG 0x003CUL
+#define GMAC_PORT_MODE_REG 0x0040UL
+#define GMAC_PORT_EN_REG 0x0044UL
+#define GMAC_PAUSE_EN_REG 0x0048UL
+#define GMAC_SHORT_RUNTS_THR_REG 0x0050UL
+#define GMAC_AN_NEG_STATE_REG 0x0058UL
+#define GMAC_TX_LOCAL_PAGE_REG 0x005CUL
+#define GMAC_TRANSMIT_CONTROL_REG 0x0060UL
+#define GMAC_REC_FILT_CONTROL_REG 0x0064UL
+#define GMAC_PTP_CONFIG_REG 0x0074UL
+
+#define GMAC_RX_OCTETS_TOTAL_OK_REG 0x0080UL
+#define GMAC_RX_OCTETS_BAD_REG 0x0084UL
+#define GMAC_RX_UC_PKTS_REG 0x0088UL
+#define GMAC_RX_MC_PKTS_REG 0x008CUL
+#define GMAC_RX_BC_PKTS_REG 0x0090UL
+#define GMAC_RX_PKTS_64OCTETS_REG 0x0094UL
+#define GMAC_RX_PKTS_65TO127OCTETS_REG 0x0098UL
+#define GMAC_RX_PKTS_128TO255OCTETS_REG 0x009CUL
+#define GMAC_RX_PKTS_255TO511OCTETS_REG 0x00A0UL
+#define GMAC_RX_PKTS_512TO1023OCTETS_REG 0x00A4UL
+#define GMAC_RX_PKTS_1024TO1518OCTETS_REG 0x00A8UL
+#define GMAC_RX_PKTS_1519TOMAXOCTETS_REG 0x00ACUL
+#define GMAC_RX_FCS_ERRORS_REG 0x00B0UL
+#define GMAC_RX_TAGGED_REG 0x00B4UL
+#define GMAC_RX_DATA_ERR_REG 0x00B8UL
+#define GMAC_RX_ALIGN_ERRORS_REG 0x00BCUL
+#define GMAC_RX_LONG_ERRORS_REG 0x00C0UL
+#define GMAC_RX_JABBER_ERRORS_REG 0x00C4UL
+#define GMAC_RX_PAUSE_MACCTRL_FRAM_REG 0x00C8UL
+#define GMAC_RX_UNKNOWN_MACCTRL_FRAM_REG 0x00CCUL
+#define GMAC_RX_VERY_LONG_ERR_CNT_REG 0x00D0UL
+#define GMAC_RX_RUNT_ERR_CNT_REG 0x00D4UL
+#define GMAC_RX_SHORT_ERR_CNT_REG 0x00D8UL
+#define GMAC_RX_FILT_PKT_CNT_REG 0x00E8UL
+#define GMAC_RX_OCTETS_TOTAL_FILT_REG 0x00ECUL
+#define GMAC_OCTETS_TRANSMITTED_OK_REG 0x0100UL
+#define GMAC_OCTETS_TRANSMITTED_BAD_REG 0x0104UL
+#define GMAC_TX_UC_PKTS_REG 0x0108UL
+#define GMAC_TX_MC_PKTS_REG 0x010CUL
+#define GMAC_TX_BC_PKTS_REG 0x0110UL
+#define GMAC_TX_PKTS_64OCTETS_REG 0x0114UL
+#define GMAC_TX_PKTS_65TO127OCTETS_REG 0x0118UL
+#define GMAC_TX_PKTS_128TO255OCTETS_REG 0x011CUL
+#define GMAC_TX_PKTS_255TO511OCTETS_REG 0x0120UL
+#define GMAC_TX_PKTS_512TO1023OCTETS_REG 0x0124UL
+#define GMAC_TX_PKTS_1024TO1518OCTETS_REG 0x0128UL
+#define GMAC_TX_PKTS_1519TOMAXOCTETS_REG 0x012CUL
+#define GMAC_TX_EXCESSIVE_LENGTH_DROP_REG 0x014CUL
+#define GMAC_TX_UNDERRUN_REG 0x0150UL
+#define GMAC_TX_TAGGED_REG 0x0154UL
+#define GMAC_TX_CRC_ERROR_REG 0x0158UL
+#define GMAC_TX_PAUSE_FRAMES_REG 0x015CUL
+#define GAMC_RX_MAX_FRAME 0x0170UL
+#define GMAC_LINE_LOOP_BACK_REG 0x01A8UL
+#define GMAC_CF_CRC_STRIP_REG 0x01B0UL
+#define GMAC_MODE_CHANGE_EN_REG 0x01B4UL
+#define GMAC_SIXTEEN_BIT_CNTR_REG 0x01CCUL
+#define GMAC_LD_LINK_COUNTER_REG 0x01D0UL
+#define GMAC_LOOP_REG 0x01DCUL
+#define GMAC_RECV_CONTROL_REG 0x01E0UL
+#define GMAC_PCS_RX_EN_REG 0x01E4UL
+#define GMAC_VLAN_CODE_REG 0x01E8UL
+#define GMAC_RX_OVERRUN_CNT_REG 0x01ECUL
+#define GMAC_RX_LENGTHFIELD_ERR_CNT_REG 0x01F4UL
+#define GMAC_RX_FAIL_COMMA_CNT_REG 0x01F8UL
+#define GMAC_STATION_ADDR_LOW_0_REG 0x0200UL
+#define GMAC_STATION_ADDR_HIGH_0_REG 0x0204UL
+#define GMAC_STATION_ADDR_LOW_1_REG 0x0208UL
+#define GMAC_STATION_ADDR_HIGH_1_REG 0x020CUL
+#define GMAC_STATION_ADDR_LOW_2_REG 0x0210UL
+#define GMAC_STATION_ADDR_HIGH_2_REG 0x0214UL
+#define GMAC_STATION_ADDR_LOW_3_REG 0x0218UL
+#define GMAC_STATION_ADDR_HIGH_3_REG 0x021CUL
+#define GMAC_STATION_ADDR_LOW_4_REG 0x0220UL
+#define GMAC_STATION_ADDR_HIGH_4_REG 0x0224UL
+#define GMAC_STATION_ADDR_LOW_5_REG 0x0228UL
+#define GMAC_STATION_ADDR_HIGH_5_REG 0x022CUL
+#define GMAC_STATION_ADDR_LOW_MSK_0_REG 0x0230UL
+#define GMAC_STATION_ADDR_HIGH_MSK_0_REG 0x0234UL
+#define GMAC_STATION_ADDR_LOW_MSK_1_REG 0x0238UL
+#define GMAC_STATION_ADDR_HIGH_MSK_1_REG 0x023CUL
+#define GMAC_MAC_SKIP_LEN_REG 0x0240UL
+#define GMAC_TX_LOOP_PKT_PRI_REG 0x0378UL
+
+#define XGMAC_INT_STATUS_REG 0x0
+#define XGMAC_INT_ENABLE_REG 0x4
+#define XGMAC_INT_SET_REG 0x8
+#define XGMAC_IERR_U_INFO_REG 0xC
+#define XGMAC_OVF_INFO_REG 0x10
+#define XGMAC_OVF_CNT_REG 0x14
+#define XGMAC_PORT_MODE_REG 0x40
+#define XGMAC_CLK_ENABLE_REG 0x44
+#define XGMAC_RESET_REG 0x48
+#define XGMAC_LINK_CONTROL_REG 0x50
+#define XGMAC_LINK_STATUS_REG 0x54
+#define XGMAC_SPARE_REG 0xC0
+#define XGMAC_SPARE_CNT_REG 0xC4
+
+#define XGMAC_MAC_ENABLE_REG 0x100
+#define XGMAC_MAC_CONTROL_REG 0x104
+#define XGMAC_MAC_IPG_REG 0x120
+#define XGMAC_MAC_MSG_CRC_EN_REG 0x124
+#define XGMAC_MAC_MSG_IMG_REG 0x128
+#define XGMAC_MAC_MSG_FC_CFG_REG 0x12C
+#define XGMAC_MAC_MSG_TC_CFG_REG 0x130
+#define XGMAC_MAC_PAD_SIZE_REG 0x134
+#define XGMAC_MAC_MIN_PKT_SIZE_REG 0x138
+#define XGMAC_MAC_MAX_PKT_SIZE_REG 0x13C
+#define XGMAC_MAC_PAUSE_CTRL_REG 0x160
+#define XGMAC_MAC_PAUSE_TIME_REG 0x164
+#define XGMAC_MAC_PAUSE_GAP_REG 0x168
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG 0x16C
+#define XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG 0x170
+#define XGMAC_MAC_PAUSE_PEER_MAC_H_REG 0x174
+#define XGMAC_MAC_PAUSE_PEER_MAC_L_REG 0x178
+#define XGMAC_MAC_PFC_PRI_EN_REG 0x17C
+#define XGMAC_MAC_1588_CTRL_REG 0x180
+#define XGMAC_MAC_1588_TX_PORT_DLY_REG 0x184
+#define XGMAC_MAC_1588_RX_PORT_DLY_REG 0x188
+#define XGMAC_MAC_1588_ASYM_DLY_REG 0x18C
+#define XGMAC_MAC_1588_ADJUST_CFG_REG 0x190
+#define XGMAC_MAC_Y1731_ETH_TYPE_REG 0x194
+#define XGMAC_MAC_MIB_CONTROL_REG 0x198
+#define XGMAC_MAC_WAN_RATE_ADJUST_REG 0x19C
+#define XGMAC_MAC_TX_ERR_MARK_REG 0x1A0
+#define XGMAC_MAC_TX_LF_RF_CONTROL_REG 0x1A4
+#define XGMAC_MAC_RX_LF_RF_STATUS_REG 0x1A8
+#define XGMAC_MAC_TX_RUNT_PKT_CNT_REG 0x1C0
+#define XGMAC_MAC_RX_RUNT_PKT_CNT_REG 0x1C4
+#define XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG 0x1C8
+#define XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG 0x1CC
+#define XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG 0x1D0
+#define XGMAC_MAC_RX_ERR_MSG_CNT_REG 0x1D4
+#define XGMAC_MAC_RX_ERR_EFD_CNT_REG 0x1D8
+#define XGMAC_MAC_ERR_INFO_REG 0x1DC
+#define XGMAC_MAC_DBG_INFO_REG 0x1E0
+
+#define XGMAC_PCS_BASER_SYNC_THD_REG 0x330
+#define XGMAC_PCS_STATUS1_REG 0x404
+#define XGMAC_PCS_BASER_STATUS1_REG 0x410
+#define XGMAC_PCS_BASER_STATUS2_REG 0x414
+#define XGMAC_PCS_BASER_SEEDA_0_REG 0x420
+#define XGMAC_PCS_BASER_SEEDA_1_REG 0x424
+#define XGMAC_PCS_BASER_SEEDB_0_REG 0x428
+#define XGMAC_PCS_BASER_SEEDB_1_REG 0x42C
+#define XGMAC_PCS_BASER_TEST_CONTROL_REG 0x430
+#define XGMAC_PCS_BASER_TEST_ERR_CNT_REG 0x434
+#define XGMAC_PCS_DBG_INFO_REG 0x4C0
+#define XGMAC_PCS_DBG_INFO1_REG 0x4C4
+#define XGMAC_PCS_DBG_INFO2_REG 0x4C8
+#define XGMAC_PCS_DBG_INFO3_REG 0x4CC
+
+#define XGMAC_PMA_ENABLE_REG 0x700
+#define XGMAC_PMA_CONTROL_REG 0x704
+#define XGMAC_PMA_SIGNAL_STATUS_REG 0x708
+#define XGMAC_PMA_DBG_INFO_REG 0x70C
+#define XGMAC_PMA_FEC_ABILITY_REG 0x740
+#define XGMAC_PMA_FEC_CONTROL_REG 0x744
+#define XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG 0x750
+#define XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG 0x760
+
+#define XGMAC_TX_PKTS_FRAGMENT 0x0000
+#define XGMAC_TX_PKTS_UNDERSIZE 0x0008
+#define XGMAC_TX_PKTS_UNDERMIN 0x0010
+#define XGMAC_TX_PKTS_64OCTETS 0x0018
+#define XGMAC_TX_PKTS_65TO127OCTETS 0x0020
+#define XGMAC_TX_PKTS_128TO255OCTETS 0x0028
+#define XGMAC_TX_PKTS_256TO511OCTETS 0x0030
+#define XGMAC_TX_PKTS_512TO1023OCTETS 0x0038
+#define XGMAC_TX_PKTS_1024TO1518OCTETS 0x0040
+#define XGMAC_TX_PKTS_1519TOMAXOCTETS 0x0048
+#define XGMAC_TX_PKTS_1519TOMAXOCTETSOK 0x0050
+#define XGMAC_TX_PKTS_OVERSIZE 0x0058
+#define XGMAC_TX_PKTS_JABBER 0x0060
+#define XGMAC_TX_GOODPKTS 0x0068
+#define XGMAC_TX_GOODOCTETS 0x0070
+#define XGMAC_TX_TOTAL_PKTS 0x0078
+#define XGMAC_TX_TOTALOCTETS 0x0080
+#define XGMAC_TX_UNICASTPKTS 0x0088
+#define XGMAC_TX_MULTICASTPKTS 0x0090
+#define XGMAC_TX_BROADCASTPKTS 0x0098
+#define XGMAC_TX_PRI0PAUSEPKTS 0x00a0
+#define XGMAC_TX_PRI1PAUSEPKTS 0x00a8
+#define XGMAC_TX_PRI2PAUSEPKTS 0x00b0
+#define XGMAC_TX_PRI3PAUSEPKTS 0x00b8
+#define XGMAC_TX_PRI4PAUSEPKTS 0x00c0
+#define XGMAC_TX_PRI5PAUSEPKTS 0x00c8
+#define XGMAC_TX_PRI6PAUSEPKTS 0x00d0
+#define XGMAC_TX_PRI7PAUSEPKTS 0x00d8
+#define XGMAC_TX_MACCTRLPKTS 0x00e0
+#define XGMAC_TX_1731PKTS 0x00e8
+#define XGMAC_TX_1588PKTS 0x00f0
+#define XGMAC_RX_FROMAPPGOODPKTS 0x00f8
+#define XGMAC_RX_FROMAPPBADPKTS 0x0100
+#define XGMAC_TX_ERRALLPKTS 0x0108
+
+#define XGMAC_RX_PKTS_FRAGMENT 0x0110
+#define XGMAC_RX_PKTSUNDERSIZE 0x0118
+#define XGMAC_RX_PKTS_UNDERMIN 0x0120
+#define XGMAC_RX_PKTS_64OCTETS 0x0128
+#define XGMAC_RX_PKTS_65TO127OCTETS 0x0130
+#define XGMAC_RX_PKTS_128TO255OCTETS 0x0138
+#define XGMAC_RX_PKTS_256TO511OCTETS 0x0140
+#define XGMAC_RX_PKTS_512TO1023OCTETS 0x0148
+#define XGMAC_RX_PKTS_1024TO1518OCTETS 0x0150
+#define XGMAC_RX_PKTS_1519TOMAXOCTETS 0x0158
+#define XGMAC_RX_PKTS_1519TOMAXOCTETSOK 0x0160
+#define XGMAC_RX_PKTS_OVERSIZE 0x0168
+#define XGMAC_RX_PKTS_JABBER 0x0170
+#define XGMAC_RX_GOODPKTS 0x0178
+#define XGMAC_RX_GOODOCTETS 0x0180
+#define XGMAC_RX_TOTAL_PKTS 0x0188
+#define XGMAC_RX_TOTALOCTETS 0x0190
+#define XGMAC_RX_UNICASTPKTS 0x0198
+#define XGMAC_RX_MULTICASTPKTS 0x01a0
+#define XGMAC_RX_BROADCASTPKTS 0x01a8
+#define XGMAC_RX_PRI0PAUSEPKTS 0x01b0
+#define XGMAC_RX_PRI1PAUSEPKTS 0x01b8
+#define XGMAC_RX_PRI2PAUSEPKTS 0x01c0
+#define XGMAC_RX_PRI3PAUSEPKTS 0x01c8
+#define XGMAC_RX_PRI4PAUSEPKTS 0x01d0
+#define XGMAC_RX_PRI5PAUSEPKTS 0x01d8
+#define XGMAC_RX_PRI6PAUSEPKTS 0x01e0
+#define XGMAC_RX_PRI7PAUSEPKTS 0x01e8
+#define XGMAC_RX_MACCTRLPKTS 0x01f0
+#define XGMAC_TX_SENDAPPGOODPKTS 0x01f8
+#define XGMAC_TX_SENDAPPBADPKTS 0x0200
+#define XGMAC_RX_1731PKTS 0x0208
+#define XGMAC_RX_SYMBOLERRPKTS 0x0210
+#define XGMAC_RX_FCSERRPKTS 0x0218
+
+#define DSAF_SRAM_INIT_OVER_M 0xff
+#define DSAFV2_SRAM_INIT_OVER_M 0x3ff
+#define DSAF_SRAM_INIT_OVER_S 0
+
+#define DSAF_CFG_EN_S 0
+#define DSAF_CFG_TC_MODE_S 1
+#define DSAF_CFG_CRC_EN_S 2
+#define DSAF_CFG_SBM_INIT_S 3
+#define DSAF_CFG_MIX_MODE_S 4
+#define DSAF_CFG_STP_MODE_S 5
+#define DSAF_CFG_LOCA_ADDR_EN_S 6
+#define DSAFV2_CFG_VLAN_TAG_MODE_S 17
+
+#define DSAF_CNT_CLR_CE_S 0
+#define DSAF_SNAP_EN_S 1
+
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_XGE 41
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_1000 410
+#define HNS_DSAF_PFC_UNIT_CNT_FOR_GE_2500 103
+
+#define DSAF_PFC_UNINT_CNT_M ((1ULL << 9) - 1)
+#define DSAF_PFC_UNINT_CNT_S 0
+
+#define DSAF_MAC_PAUSE_RX_EN_B 2
+#define DSAF_PFC_PAUSE_RX_EN_B 1
+#define DSAF_PFC_PAUSE_TX_EN_B 0
+
+#define DSAF_PPE_QID_CFG_M 0xFF
+#define DSAF_PPE_QID_CFG_S 0
+
+#define DSAF_SW_PORT_TYPE_M 3
+#define DSAF_SW_PORT_TYPE_S 0
+
+#define DSAF_STP_PORT_TYPE_M 7
+#define DSAF_STP_PORT_TYPE_S 0
+
+#define DSAF_INODE_IN_PORT_NUM_M 7
+#define DSAF_INODE_IN_PORT_NUM_S 0
+#define DSAFV2_INODE_IN_PORT1_NUM_M (7ULL << 3)
+#define DSAFV2_INODE_IN_PORT1_NUM_S 3
+#define DSAFV2_INODE_IN_PORT2_NUM_M (7ULL << 6)
+#define DSAFV2_INODE_IN_PORT2_NUM_S 6
+#define DSAFV2_INODE_IN_PORT3_NUM_M (7ULL << 9)
+#define DSAFV2_INODE_IN_PORT3_NUM_S 9
+#define DSAFV2_INODE_IN_PORT4_NUM_M (7ULL << 12)
+#define DSAFV2_INODE_IN_PORT4_NUM_S 12
+#define DSAFV2_INODE_IN_PORT5_NUM_M (7ULL << 15)
+#define DSAFV2_INODE_IN_PORT5_NUM_S 15
+
+#define HNS_DSAF_I4TC_CFG 0x18688688
+#define HNS_DSAF_I8TC_CFG 0x18FAC688
+
+#define DSAF_SBM_CFG_SHCUT_EN_S 0
+#define DSAF_SBM_CFG_EN_S 1
+#define DSAF_SBM_CFG_MIB_EN_S 2
+#define DSAF_SBM_CFG_ECC_INVERT_EN_S 3
+
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_S 20
+#define DSAF_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 11) - 1) << 20)
+
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_S 0
+#define DSAF_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_S 10
+#define DSAF_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG2_SET_BUF_NUM_S 0
+#define DSAF_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_S 10
+#define DSAF_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0
+#define DSAF_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 0)
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 10
+#define DSAF_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 10) - 1) << 10)
+
+#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG0_VC1_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_S 9
+#define DSAFV2_SBM_CFG0_VC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9)
+#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_S 18
+#define DSAFV2_SBM_CFG0_COM_MAX_BUF_NUM_M (((1ULL << 10) - 1) << 18)
+
+#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG1_TC4_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_S 9
+#define DSAFV2_SBM_CFG1_TC0_MAX_BUF_NUM_M (((1ULL << 9) - 1) << 9)
+
+#define DSAFV2_SBM_CFG2_SET_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG2_SET_BUF_NUM_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_S 9
+#define DSAFV2_SBM_CFG2_RESET_BUF_NUM_M (((1ULL << 9) - 1) << 9)
+
+#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_S 0
+#define DSAFV2_SBM_CFG3_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_S 9
+#define DSAFV2_SBM_CFG3_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
+
+#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_S 0
+#define DSAFV2_SBM_CFG4_SET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 0)
+#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_S 9
+#define DSAFV2_SBM_CFG4_RESET_BUF_NUM_NO_PFC_M (((1ULL << 9) - 1) << 9)
+
+#define DSAF_CHNS_MASK 0x3f000
+#define DSAF_SBM_ROCEE_CFG_CRD_EN_B 2
+#define SRST_TIME_INTERVAL 20
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_S 0
+#define DSAFV2_SBM_CFG2_ROCEE_SET_BUF_NUM_M (((1ULL << 8) - 1) << 0)
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_S 8
+#define DSAFV2_SBM_CFG2_ROCEE_RESET_BUF_NUM_M (((1ULL << 8) - 1) << 8)
+
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_S (0)
+#define DSAFV2_SBM_CFG2_PPE_SET_BUF_NUM_M (((1ULL << 6) - 1) << 0)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_S (6)
+#define DSAFV2_SBM_CFG2_PPE_RESET_BUF_NUM_M (((1ULL << 6) - 1) << 6)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_S (12)
+#define DSAFV2_SBM_CFG2_PPE_CFG_USEFUL_NUM_M (((1ULL << 6) - 1) << 12)
+
+#define DSAF_TBL_TCAM_ADDR_S 0
+#define DSAF_TBL_TCAM_ADDR_M ((1ULL << 9) - 1)
+
+#define DSAF_TBL_LINE_ADDR_S 0
+#define DSAF_TBL_LINE_ADDR_M ((1ULL << 15) - 1)
+
+#define DSAF_TBL_MCAST_CFG4_VM128_112_S 0
+#define DSAF_TBL_MCAST_CFG4_VM128_112_M (((1ULL << 7) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG4_ITEM_VLD_S 7
+#define DSAF_TBL_MCAST_CFG4_OLD_EN_S 8
+
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_S 0
+#define DSAF_TBL_MCAST_CFG0_XGE5_0_M (((1ULL << 6) - 1) << 0)
+#define DSAF_TBL_MCAST_CFG0_VM25_0_S 6
+#define DSAF_TBL_MCAST_CFG0_VM25_0_M (((1ULL << 26) - 1) << 6)
+
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_S 0
+#define DSAF_TBL_UCAST_CFG1_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_UCAST_CFG1_DVC_S 8
+#define DSAF_TBL_UCAST_CFG1_MAC_DISCARD_S 9
+#define DSAF_TBL_UCAST_CFG1_ITEM_VLD_S 10
+#define DSAF_TBL_UCAST_CFG1_OLD_EN_S 11
+
+#define DSAF_TBL_LINE_CFG_OUT_PORT_S 0
+#define DSAF_TBL_LINE_CFG_OUT_PORT_M (((1ULL << 8) - 1) << 0)
+#define DSAF_TBL_LINE_CFG_DVC_S 8
+#define DSAF_TBL_LINE_CFG_MAC_DISCARD_S 9
+
+#define DSAF_TBL_PUL_OLD_RSLT_RE_S 0
+#define DSAF_TBL_PUL_MCAST_VLD_S 1
+#define DSAF_TBL_PUL_TCAM_DATA_VLD_S 2
+#define DSAF_TBL_PUL_UCAST_VLD_S 3
+#define DSAF_TBL_PUL_LINE_VLD_S 4
+#define DSAF_TBL_PUL_TCAM_LOAD_S 5
+#define DSAF_TBL_PUL_LINE_LOAD_S 6
+
+#define DSAF_TBL_DFX_LINE_LKUP_NUM_EN_S 0
+#define DSAF_TBL_DFX_UC_LKUP_NUM_EN_S 1
+#define DSAF_TBL_DFX_MC_LKUP_NUM_EN_S 2
+#define DSAF_TBL_DFX_BC_LKUP_NUM_EN_S 3
+#define DSAF_TBL_DFX_RAM_ERR_INJECT_EN_S 4
+
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_S 0
+#define DSAF_VOQ_BP_ALL_DOWNTHRD_M (((1ULL << 10) - 1) << 0)
+#define DSAF_VOQ_BP_ALL_UPTHRD_S 10
+#define DSAF_VOQ_BP_ALL_UPTHRD_M (((1ULL << 10) - 1) << 10)
+
+#define DSAF_XGE_GE_WORK_MODE_S 0
+#define DSAF_XGE_GE_LOOPBACK_S 1
+
+#define DSAF_FC_XGE_TX_PAUSE_S 0
+#define DSAF_REGS_XGE_CNT_CAR_S 1
+
+#define PPE_CFG_QID_MODE_DEF_QID_S 0
+#define PPE_CFG_QID_MODE_DEF_QID_M (0xff << PPE_CFG_QID_MODE_DEF_QID_S)
+
+#define PPE_CFG_QID_MODE_CF_QID_MODE_S 8
+#define PPE_CFG_QID_MODE_CF_QID_MODE_M (0x7 << PPE_CFG_QID_MODE_CF_QID_MODE_S)
+
+#define PPEV2_CFG_RSS_TBL_4N0_S 0
+#define PPEV2_CFG_RSS_TBL_4N0_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N0_S)
+
+#define PPEV2_CFG_RSS_TBL_4N1_S 8
+#define PPEV2_CFG_RSS_TBL_4N1_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N1_S)
+
+#define PPEV2_CFG_RSS_TBL_4N2_S 16
+#define PPEV2_CFG_RSS_TBL_4N2_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N2_S)
+
+#define PPEV2_CFG_RSS_TBL_4N3_S 24
+#define PPEV2_CFG_RSS_TBL_4N3_M (((1UL << 5) - 1) << PPEV2_CFG_RSS_TBL_4N3_S)
+
+#define DSAFV2_SERDES_LBK_EN_B 8
+#define DSAFV2_SERDES_LBK_QID_S 0
+#define DSAFV2_SERDES_LBK_QID_M (((1UL << 8) - 1) << DSAFV2_SERDES_LBK_QID_S)
+
+#define PPE_CNT_CLR_CE_B 0
+#define PPE_CNT_CLR_SNAP_EN_B 1
+
+#define PPE_INT_GAPTIME_B 0
+#define PPE_INT_GAPTIME_M 0x3ff
+
+#define PPE_COMMON_CNT_CLR_CE_B 0
+#define PPE_COMMON_CNT_CLR_SNAP_EN_B 1
+#define RCB_COM_TSO_MODE_B 0
+#define RCB_COM_CFG_FNA_B 1
+#define RCB_COM_CFG_FA_B 0
+
+#define GMAC_DUPLEX_TYPE_B 0
+
+#define GMAC_TX_WATER_LINE_MASK ((1UL << 8) - 1)
+#define GMAC_TX_WATER_LINE_SHIFT 0
+
+#define GMAC_FC_TX_TIMER_S 0
+#define GMAC_FC_TX_TIMER_M 0xffff
+
+#define GMAC_MAX_FRM_SIZE_S 0
+#define GMAC_MAX_FRM_SIZE_M 0xffff
+
+#define GMAC_PORT_MODE_S 0
+#define GMAC_PORT_MODE_M 0xf
+
+#define GMAC_RGMII_1000M_DELAY_B 4
+#define GMAC_MII_TX_EDGE_SEL_B 5
+#define GMAC_FIFO_ERR_AUTO_RST_B 6
+#define GMAC_DBG_CLK_LOS_MSK_B 7
+
+#define GMAC_PORT_RX_EN_B 1
+#define GMAC_PORT_TX_EN_B 2
+
+#define GMAC_PAUSE_EN_RX_FDFC_B 0
+#define GMAC_PAUSE_EN_TX_FDFC_B 1
+#define GMAC_PAUSE_EN_TX_HDFC_B 2
+
+#define GMAC_SHORT_RUNTS_THR_S 0
+#define GMAC_SHORT_RUNTS_THR_M 0x1f
+
+#define GMAC_AN_NEG_STAT_FD_B 5
+#define GMAC_AN_NEG_STAT_HD_B 6
+#define GMAC_AN_NEG_STAT_RF1_DUPLIEX_B 12
+#define GMAC_AN_NEG_STAT_RF2_B 13
+
+#define GMAC_AN_NEG_STAT_NP_LNK_OK_B 15
+#define GMAC_AN_NEG_STAT_RX_SYNC_OK_B 20
+#define GMAC_AN_NEG_STAT_AN_DONE_B 21
+
+#define GMAC_AN_NEG_STAT_PS_S 7
+#define GMAC_AN_NEG_STAT_PS_M (0x3 << GMAC_AN_NEG_STAT_PS_S)
+
+#define GMAC_AN_NEG_STAT_SPEED_S 10
+#define GMAC_AN_NEG_STAT_SPEED_M (0x3 << GMAC_AN_NEG_STAT_SPEED_S)
+
+#define GMAC_TX_AN_EN_B 5
+#define GMAC_TX_CRC_ADD_B 6
+#define GMAC_TX_PAD_EN_B 7
+
+#define GMAC_LINE_LOOPBACK_B 0
+
+#define GMAC_LP_REG_CF_EXT_DRV_LP_B 1
+#define GMAC_LP_REG_CF2MI_LP_EN_B 2
+
+#define GMAC_MODE_CHANGE_EB_B 0
+#define GMAC_UC_MATCH_EN_B 0
+#define GMAC_ADDR_EN_B 16
+
+#define GMAC_RECV_CTRL_STRIP_PAD_EN_B 3
+#define GMAC_RECV_CTRL_RUNT_PKT_EN_B 4
+
+#define GMAC_TX_LOOP_PKT_HIG_PRI_B 0
+#define GMAC_TX_LOOP_PKT_EN_B 1
+
+#define XGMAC_PORT_MODE_TX_S 0x0
+#define XGMAC_PORT_MODE_TX_M (0x3 << XGMAC_PORT_MODE_TX_S)
+#define XGMAC_PORT_MODE_TX_40G_B 0x3
+#define XGMAC_PORT_MODE_RX_S 0x4
+#define XGMAC_PORT_MODE_RX_M (0x3 << XGMAC_PORT_MODE_RX_S)
+#define XGMAC_PORT_MODE_RX_40G_B 0x7
+
+#define XGMAC_ENABLE_TX_B 0
+#define XGMAC_ENABLE_RX_B 1
+
+#define XGMAC_UNIDIR_EN_B 0
+#define XGMAC_RF_TX_EN_B 1
+#define XGMAC_LF_RF_INSERT_S 2
+#define XGMAC_LF_RF_INSERT_M (0x3 << XGMAC_LF_RF_INSERT_S)
+
+#define XGMAC_CTL_TX_FCS_B 0
+#define XGMAC_CTL_TX_PAD_B 1
+#define XGMAC_CTL_TX_PREAMBLE_TRANS_B 3
+#define XGMAC_CTL_TX_UNDER_MIN_ERR_B 4
+#define XGMAC_CTL_TX_TRUNCATE_B 5
+#define XGMAC_CTL_TX_1588_B 8
+#define XGMAC_CTL_TX_1731_B 9
+#define XGMAC_CTL_TX_PFC_B 10
+#define XGMAC_CTL_RX_FCS_B 16
+#define XGMAC_CTL_RX_FCS_STRIP_B 17
+#define XGMAC_CTL_RX_PREAMBLE_TRANS_B 19
+#define XGMAC_CTL_RX_UNDER_MIN_ERR_B 20
+#define XGMAC_CTL_RX_TRUNCATE_B 21
+#define XGMAC_CTL_RX_1588_B 24
+#define XGMAC_CTL_RX_1731_B 25
+#define XGMAC_CTL_RX_PFC_B 26
+
+#define XGMAC_PMA_FEC_CTL_TX_B 0
+#define XGMAC_PMA_FEC_CTL_RX_B 1
+#define XGMAC_PMA_FEC_CTL_ERR_EN 2
+#define XGMAC_PMA_FEC_CTL_ERR_SH 3
+
+#define XGMAC_PAUSE_CTL_TX_B 0
+#define XGMAC_PAUSE_CTL_RX_B 1
+#define XGMAC_PAUSE_CTL_RSP_MODE_B 2
+#define XGMAC_PAUSE_CTL_TX_XOFF_B 3
+
+static inline void dsaf_write_reg(u8 __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+#define dsaf_write_dev(a, reg, value) \
+ dsaf_write_reg((a)->io_base, (reg), (value))
+
+static inline u32 dsaf_read_reg(u8 __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void dsaf_write_syscon(struct regmap *base, u32 reg, u32 value)
+{
+ regmap_write(base, reg, value);
+}
+
+static inline int dsaf_read_syscon(struct regmap *base, u32 reg, u32 *val)
+{
+ return regmap_read(base, reg, val);
+}
+
+#define dsaf_read_dev(a, reg) \
+ dsaf_read_reg((a)->io_base, (reg))
+
+#define dsaf_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= (((val) << (shift)) & (mask)); \
+ } while (0)
+
+#define dsaf_set_bit(origin, shift, val) \
+ dsaf_set_field((origin), (1ull << (shift)), (shift), (val))
+
+static inline void dsaf_set_reg_field(u8 __iomem *base, u32 reg, u32 mask,
+ u32 shift, u32 val)
+{
+ u32 origin = dsaf_read_reg(base, reg);
+
+ dsaf_set_field(origin, mask, shift, val);
+ dsaf_write_reg(base, reg, origin);
+}
+
+#define dsaf_set_dev_field(dev, reg, mask, shift, val) \
+ dsaf_set_reg_field((dev)->io_base, (reg), (mask), (shift), (val))
+
+#define dsaf_set_dev_bit(dev, reg, bit, val) \
+ dsaf_set_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit), (val))
+
+#define dsaf_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define dsaf_get_bit(origin, shift) \
+ dsaf_get_field((origin), (1ull << (shift)), (shift))
+
+static inline u32 dsaf_get_reg_field(u8 __iomem *base, u32 reg, u32 mask,
+ u32 shift)
+{
+ u32 origin;
+
+ origin = dsaf_read_reg(base, reg);
+ return dsaf_get_field(origin, mask, shift);
+}
+
+#define dsaf_get_dev_field(dev, reg, mask, shift) \
+ dsaf_get_reg_field((dev)->io_base, (reg), (mask), (shift))
+
+#define dsaf_get_dev_bit(dev, reg, bit) \
+ dsaf_get_reg_field((dev)->io_base, (reg), (1ull << (bit)), (bit))
+
+#define dsaf_write_b(addr, data)\
+ writeb((data), (__iomem u8 *)(addr))
+#define dsaf_read_b(addr)\
+ readb((__iomem u8 *)(addr))
+
+#define hns_mac_reg_read64(drv, offset) \
+ readq((__iomem void *)(((drv)->io_base + 0xc00 + (offset))))
+
+#endif /* _DSAF_REG_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
new file mode 100644
index 000000000..7e3609ce1
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.c
@@ -0,0 +1,837 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/io-64-nonatomic-hi-lo.h>
+#include <linux/of_mdio.h>
+#include "hns_dsaf_main.h"
+#include "hns_dsaf_mac.h"
+#include "hns_dsaf_xgmac.h"
+#include "hns_dsaf_reg.h"
+
+static const struct mac_stats_string g_xgmac_stats_string[] = {
+ {"xgmac_tx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(tx_fragment_err)},
+ {"xgmac_tx_good_pkts_minto64", MAC_STATS_FIELD_OFF(tx_undersize)},
+ {"xgmac_tx_total_pkts_minto64", MAC_STATS_FIELD_OFF(tx_under_min_pkts)},
+ {"xgmac_tx_pkts_64", MAC_STATS_FIELD_OFF(tx_64bytes)},
+ {"xgmac_tx_pkts_65to127", MAC_STATS_FIELD_OFF(tx_65to127)},
+ {"xgmac_tx_pkts_128to255", MAC_STATS_FIELD_OFF(tx_128to255)},
+ {"xgmac_tx_pkts_256to511", MAC_STATS_FIELD_OFF(tx_256to511)},
+ {"xgmac_tx_pkts_512to1023", MAC_STATS_FIELD_OFF(tx_512to1023)},
+ {"xgmac_tx_pkts_1024to1518", MAC_STATS_FIELD_OFF(tx_1024to1518)},
+ {"xgmac_tx_pkts_1519tomax", MAC_STATS_FIELD_OFF(tx_1519tomax)},
+ {"xgmac_tx_good_pkts_1519tomax",
+ MAC_STATS_FIELD_OFF(tx_1519tomax_good)},
+ {"xgmac_tx_good_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_oversize)},
+ {"xgmac_tx_bad_pkts_untralmax", MAC_STATS_FIELD_OFF(tx_jabber_err)},
+ {"xgmac_tx_good_pkts_all", MAC_STATS_FIELD_OFF(tx_good_pkts)},
+ {"xgmac_tx_good_byte_all", MAC_STATS_FIELD_OFF(tx_good_bytes)},
+ {"xgmac_tx_total_pkt", MAC_STATS_FIELD_OFF(tx_total_pkts)},
+ {"xgmac_tx_total_byt", MAC_STATS_FIELD_OFF(tx_total_bytes)},
+ {"xgmac_tx_uc_pkt", MAC_STATS_FIELD_OFF(tx_uc_pkts)},
+ {"xgmac_tx_mc_pkt", MAC_STATS_FIELD_OFF(tx_mc_pkts)},
+ {"xgmac_tx_bc_pkt", MAC_STATS_FIELD_OFF(tx_bc_pkts)},
+ {"xgmac_tx_pause_frame_num", MAC_STATS_FIELD_OFF(tx_pfc_tc0)},
+ {"xgmac_tx_pfc_per_1pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc1)},
+ {"xgmac_tx_pfc_per_2pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc2)},
+ {"xgmac_tx_pfc_per_3pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc3)},
+ {"xgmac_tx_pfc_per_4pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc4)},
+ {"xgmac_tx_pfc_per_5pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc5)},
+ {"xgmac_tx_pfc_per_6pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc6)},
+ {"xgmac_tx_pfc_per_7pause_framer", MAC_STATS_FIELD_OFF(tx_pfc_tc7)},
+ {"xgmac_tx_mac_ctrol_frame", MAC_STATS_FIELD_OFF(tx_ctrl)},
+ {"xgmac_tx_1731_pkts", MAC_STATS_FIELD_OFF(tx_1731_pkts)},
+ {"xgmac_tx_1588_pkts", MAC_STATS_FIELD_OFF(tx_1588_pkts)},
+ {"xgmac_rx_good_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_good_from_sw)},
+ {"xgmac_rx_bad_pkt_from_dsaf", MAC_STATS_FIELD_OFF(rx_bad_from_sw)},
+ {"xgmac_tx_bad_pkt_64tomax", MAC_STATS_FIELD_OFF(tx_bad_pkts)},
+
+ {"xgmac_rx_bad_pkts_minto64", MAC_STATS_FIELD_OFF(rx_fragment_err)},
+ {"xgmac_rx_good_pkts_minto64", MAC_STATS_FIELD_OFF(rx_undersize)},
+ {"xgmac_rx_total_pkts_minto64", MAC_STATS_FIELD_OFF(rx_under_min)},
+ {"xgmac_rx_pkt_64", MAC_STATS_FIELD_OFF(rx_64bytes)},
+ {"xgmac_rx_pkt_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
+ {"xgmac_rx_pkt_128to255", MAC_STATS_FIELD_OFF(rx_128to255)},
+ {"xgmac_rx_pkt_256to511", MAC_STATS_FIELD_OFF(rx_256to511)},
+ {"xgmac_rx_pkt_512to1023", MAC_STATS_FIELD_OFF(rx_512to1023)},
+ {"xgmac_rx_pkt_1024to1518", MAC_STATS_FIELD_OFF(rx_1024to1518)},
+ {"xgmac_rx_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax)},
+ {"xgmac_rx_good_pkt_1519tomax", MAC_STATS_FIELD_OFF(rx_1519tomax_good)},
+ {"xgmac_rx_good_pkt_untramax", MAC_STATS_FIELD_OFF(rx_oversize)},
+ {"xgmac_rx_bad_pkt_untramax", MAC_STATS_FIELD_OFF(rx_jabber_err)},
+ {"xgmac_rx_good_pkt", MAC_STATS_FIELD_OFF(rx_good_pkts)},
+ {"xgmac_rx_good_byt", MAC_STATS_FIELD_OFF(rx_good_bytes)},
+ {"xgmac_rx_pkt", MAC_STATS_FIELD_OFF(rx_total_pkts)},
+ {"xgmac_rx_byt", MAC_STATS_FIELD_OFF(rx_total_bytes)},
+ {"xgmac_rx_uc_pkt", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
+ {"xgmac_rx_mc_pkt", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"xgmac_rx_bc_pkt", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
+ {"xgmac_rx_pause_frame_num", MAC_STATS_FIELD_OFF(rx_pfc_tc0)},
+ {"xgmac_rx_pfc_per_1pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc1)},
+ {"xgmac_rx_pfc_per_2pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc2)},
+ {"xgmac_rx_pfc_per_3pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc3)},
+ {"xgmac_rx_pfc_per_4pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc4)},
+ {"xgmac_rx_pfc_per_5pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc5)},
+ {"xgmac_rx_pfc_per_6pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc6)},
+ {"xgmac_rx_pfc_per_7pause_frame", MAC_STATS_FIELD_OFF(rx_pfc_tc7)},
+ {"xgmac_rx_mac_control", MAC_STATS_FIELD_OFF(rx_unknown_ctrl)},
+ {"xgmac_tx_good_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_good_to_sw)},
+ {"xgmac_tx_bad_pkt_todsaf", MAC_STATS_FIELD_OFF(tx_bad_to_sw)},
+ {"xgmac_rx_1731_pkt", MAC_STATS_FIELD_OFF(rx_1731_pkts)},
+ {"xgmac_rx_symbol_err_pkt", MAC_STATS_FIELD_OFF(rx_symbol_err)},
+ {"xgmac_rx_fcs_pkt", MAC_STATS_FIELD_OFF(rx_fcs_err)}
+};
+
+/**
+ *hns_xgmac_tx_enable - xgmac port tx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_tx_enable(struct mac_driver *drv, u32 value)
+{
+ dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_TX_B, !!value);
+}
+
+/**
+ *hns_xgmac_rx_enable - xgmac port rx enable
+ *@drv: mac driver
+ *@value: value of enable
+ */
+static void hns_xgmac_rx_enable(struct mac_driver *drv, u32 value)
+{
+ dsaf_set_dev_bit(drv, XGMAC_MAC_ENABLE_REG, XGMAC_ENABLE_RX_B, !!value);
+}
+
+/**
+ * hns_xgmac_tx_lf_rf_insert - insert lf rf control about xgmac
+ * @mac_drv: mac driver
+ * @mode: inserf rf or lf
+ */
+static void hns_xgmac_lf_rf_insert(struct mac_driver *mac_drv, u32 mode)
+{
+ dsaf_set_dev_field(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG,
+ XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, mode);
+}
+
+/**
+ * hns_xgmac__lf_rf_control_init - initial the lf rf control register
+ * @mac_drv: mac driver
+ */
+static void hns_xgmac_lf_rf_control_init(struct mac_driver *mac_drv)
+{
+ u32 val = 0;
+
+ dsaf_set_bit(val, XGMAC_UNIDIR_EN_B, 0);
+ dsaf_set_bit(val, XGMAC_RF_TX_EN_B, 1);
+ dsaf_set_field(val, XGMAC_LF_RF_INSERT_M, XGMAC_LF_RF_INSERT_S, 0);
+ dsaf_write_dev(mac_drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG, val);
+}
+
+/**
+ *hns_xgmac_enable - enable xgmac port
+ *@mac_drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_enable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_NO_LF_RF_INSERT);
+
+ /*enable XGE rX/tX */
+ if (mode == MAC_COMM_MODE_TX) {
+ hns_xgmac_tx_enable(drv, 1);
+ } else if (mode == MAC_COMM_MODE_RX) {
+ hns_xgmac_rx_enable(drv, 1);
+ } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+ hns_xgmac_tx_enable(drv, 1);
+ hns_xgmac_rx_enable(drv, 1);
+ } else {
+ dev_err(drv->dev, "error mac mode:%d\n", mode);
+ }
+}
+
+/**
+ *hns_xgmac_disable - disable xgmac port
+ *@mac_drv: mac driver
+ *@mode: mode of mac port
+ */
+static void hns_xgmac_disable(void *mac_drv, enum mac_commom_mode mode)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ if (mode == MAC_COMM_MODE_TX) {
+ hns_xgmac_tx_enable(drv, 0);
+ } else if (mode == MAC_COMM_MODE_RX) {
+ hns_xgmac_rx_enable(drv, 0);
+ } else if (mode == MAC_COMM_MODE_RX_AND_TX) {
+ hns_xgmac_tx_enable(drv, 0);
+ hns_xgmac_rx_enable(drv, 0);
+ }
+ hns_xgmac_lf_rf_insert(drv, HNS_XGMAC_LF_INSERT);
+}
+
+/**
+ *hns_xgmac_pma_fec_enable - xgmac PMA FEC enable
+ *@drv: mac driver
+ *@tx_value: tx value
+ *@rx_value: rx value
+ *return status
+ */
+static void hns_xgmac_pma_fec_enable(struct mac_driver *drv, u32 tx_value,
+ u32 rx_value)
+{
+ u32 origin = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+
+ dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_TX_B, !!tx_value);
+ dsaf_set_bit(origin, XGMAC_PMA_FEC_CTL_RX_B, !!rx_value);
+ dsaf_write_dev(drv, XGMAC_PMA_FEC_CONTROL_REG, origin);
+}
+
+/* clr exc irq for xge*/
+static void hns_xgmac_exc_irq_en(struct mac_driver *drv, u32 en)
+{
+ u32 clr_vlue = 0xfffffffful;
+ u32 msk_vlue = en ? 0xfffffffful : 0; /*1 is en, 0 is dis*/
+
+ dsaf_write_dev(drv, XGMAC_INT_STATUS_REG, clr_vlue);
+ dsaf_write_dev(drv, XGMAC_INT_ENABLE_REG, msk_vlue);
+}
+
+/**
+ *hns_xgmac_init - initialize XGE
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_init(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+ u32 port = drv->mac_id;
+
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 0);
+ msleep(100);
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, port, 1);
+
+ msleep(100);
+ hns_xgmac_lf_rf_control_init(drv);
+ hns_xgmac_exc_irq_en(drv, 0);
+
+ hns_xgmac_pma_fec_enable(drv, 0x0, 0x0);
+
+ hns_xgmac_disable(mac_drv, MAC_COMM_MODE_RX_AND_TX);
+}
+
+/**
+ *hns_xgmac_config_pad_and_crc - set xgmac pad and crc enable the same time
+ *@mac_drv: mac driver
+ *@newval:enable of pad and crc
+ */
+static void hns_xgmac_config_pad_and_crc(void *mac_drv, u8 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 origin = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+
+ dsaf_set_bit(origin, XGMAC_CTL_TX_PAD_B, !!newval);
+ dsaf_set_bit(origin, XGMAC_CTL_TX_FCS_B, !!newval);
+ dsaf_set_bit(origin, XGMAC_CTL_RX_FCS_B, !!newval);
+ dsaf_write_dev(drv, XGMAC_MAC_CONTROL_REG, origin);
+}
+
+/**
+ *hns_xgmac_pausefrm_cfg - set pause param about xgmac
+ *@mac_drv: mac driver
+ *@rx_en: enable receive
+ *@tx_en: enable transmit
+ */
+static void hns_xgmac_pausefrm_cfg(void *mac_drv, u32 rx_en, u32 tx_en)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 origin = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+
+ dsaf_set_bit(origin, XGMAC_PAUSE_CTL_TX_B, !!tx_en);
+ dsaf_set_bit(origin, XGMAC_PAUSE_CTL_RX_B, !!rx_en);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG, origin);
+}
+
+static void hns_xgmac_set_pausefrm_mac_addr(void *mac_drv, char *mac_addr)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ u32 high_val = mac_addr[1] | (mac_addr[0] << 8);
+ u32 low_val = mac_addr[5] | (mac_addr[4] << 8)
+ | (mac_addr[3] << 16) | (mac_addr[2] << 24);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG, low_val);
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG, high_val);
+}
+
+/**
+ *hns_xgmac_set_rx_ignore_pause_frames - set rx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable rx pause param
+ */
+static void hns_xgmac_set_rx_ignore_pause_frames(void *mac_drv, u32 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+ XGMAC_PAUSE_CTL_RX_B, !!enable);
+}
+
+/**
+ *hns_xgmac_set_tx_auto_pause_frames - set tx pause param about xgmac
+ *@mac_drv: mac driver
+ *@enable:enable tx pause param
+ */
+static void hns_xgmac_set_tx_auto_pause_frames(void *mac_drv, u16 enable)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_set_dev_bit(drv, XGMAC_MAC_PAUSE_CTRL_REG,
+ XGMAC_PAUSE_CTL_TX_B, !!enable);
+
+ /*if enable is not zero ,set tx pause time */
+ if (enable)
+ dsaf_write_dev(drv, XGMAC_MAC_PAUSE_TIME_REG, enable);
+}
+
+/**
+ *hns_xgmac_config_max_frame_length - set xgmac max frame length
+ *@mac_drv: mac driver
+ *@newval:xgmac max frame length
+ */
+static void hns_xgmac_config_max_frame_length(void *mac_drv, u16 newval)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ dsaf_write_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG, newval);
+}
+
+static void hns_xgmac_update_stats(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = &drv->mac_cb->hw_stats;
+
+ /* TX */
+ hw_stats->tx_fragment_err
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+ hw_stats->tx_undersize
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+ hw_stats->tx_under_min_pkts
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+ hw_stats->tx_64bytes = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+ hw_stats->tx_65to127
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+ hw_stats->tx_128to255
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+ hw_stats->tx_256to511
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+ hw_stats->tx_512to1023
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+ hw_stats->tx_1024to1518
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+ hw_stats->tx_1519tomax
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+ hw_stats->tx_1519tomax_good
+ = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+ hw_stats->tx_oversize = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+ hw_stats->tx_jabber_err = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+ hw_stats->tx_good_pkts = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+ hw_stats->tx_good_bytes = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+ hw_stats->tx_total_pkts = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+ hw_stats->tx_total_bytes
+ = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+ hw_stats->tx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+ hw_stats->tx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+ hw_stats->tx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+ hw_stats->tx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+ hw_stats->tx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+ hw_stats->tx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+ hw_stats->tx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+ hw_stats->tx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+ hw_stats->tx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+ hw_stats->tx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+ hw_stats->tx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+ hw_stats->tx_ctrl = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+ hw_stats->tx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+ hw_stats->tx_1588_pkts = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+ hw_stats->rx_good_from_sw
+ = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+ hw_stats->rx_bad_from_sw
+ = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+ hw_stats->tx_bad_pkts = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+
+ /* RX */
+ hw_stats->rx_fragment_err
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+ hw_stats->rx_undersize
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+ hw_stats->rx_under_min
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+ hw_stats->rx_64bytes = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+ hw_stats->rx_65to127
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+ hw_stats->rx_128to255
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+ hw_stats->rx_256to511
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+ hw_stats->rx_512to1023
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+ hw_stats->rx_1024to1518
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+ hw_stats->rx_1519tomax
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+ hw_stats->rx_1519tomax_good
+ = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+ hw_stats->rx_oversize = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+ hw_stats->rx_jabber_err = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+ hw_stats->rx_good_pkts = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+ hw_stats->rx_good_bytes = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+ hw_stats->rx_total_pkts = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+ hw_stats->rx_total_bytes
+ = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+ hw_stats->rx_uc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+ hw_stats->rx_mc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+ hw_stats->rx_bc_pkts = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+ hw_stats->rx_pfc_tc0 = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+ hw_stats->rx_pfc_tc1 = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+ hw_stats->rx_pfc_tc2 = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+ hw_stats->rx_pfc_tc3 = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+ hw_stats->rx_pfc_tc4 = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+ hw_stats->rx_pfc_tc5 = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+ hw_stats->rx_pfc_tc6 = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+ hw_stats->rx_pfc_tc7 = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+
+ hw_stats->rx_unknown_ctrl
+ = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+ hw_stats->tx_good_to_sw
+ = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+ hw_stats->tx_bad_to_sw
+ = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+ hw_stats->rx_1731_pkts = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+ hw_stats->rx_symbol_err
+ = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+ hw_stats->rx_fcs_err = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+}
+
+/**
+ *hns_xgmac_free - free xgmac driver
+ *@mac_drv: mac driver
+ */
+static void hns_xgmac_free(void *mac_drv)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct dsaf_device *dsaf_dev
+ = (struct dsaf_device *)dev_get_drvdata(drv->dev);
+
+ u32 mac_id = drv->mac_id;
+
+ dsaf_dev->misc_op->xge_srst(dsaf_dev, mac_id, 0);
+}
+
+/**
+ *hns_xgmac_get_info - get xgmac information
+ *@mac_drv: mac driver
+ *@mac_info:mac information
+ */
+static void hns_xgmac_get_info(void *mac_drv, struct mac_info *mac_info)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 pause_time, pause_ctrl, port_mode, ctrl_val;
+
+ ctrl_val = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+ mac_info->pad_and_crc_en = dsaf_get_bit(ctrl_val, XGMAC_CTL_TX_PAD_B);
+ mac_info->auto_neg = 0;
+
+ pause_time = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+ mac_info->tx_pause_time = pause_time;
+
+ port_mode = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+ mac_info->port_en = dsaf_get_field(port_mode, XGMAC_PORT_MODE_TX_M,
+ XGMAC_PORT_MODE_TX_S) &&
+ dsaf_get_field(port_mode, XGMAC_PORT_MODE_RX_M,
+ XGMAC_PORT_MODE_RX_S);
+ mac_info->duplex = 1;
+ mac_info->speed = MAC_SPEED_10000;
+
+ pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ mac_info->rx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+ mac_info->tx_pause_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_pausefrm_cfg - get xgmac pause param
+ *@mac_drv: mac driver
+ *@rx_en:xgmac rx pause enable
+ *@tx_en:xgmac tx pause enable
+ */
+static void hns_xgmac_get_pausefrm_cfg(void *mac_drv, u32 *rx_en, u32 *tx_en)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 pause_ctrl;
+
+ pause_ctrl = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ *rx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_RX_B);
+ *tx_en = dsaf_get_bit(pause_ctrl, XGMAC_PAUSE_CTL_TX_B);
+}
+
+/**
+ *hns_xgmac_get_link_status - get xgmac link status
+ *@mac_drv: mac driver
+ *@link_stat: xgmac link stat
+ */
+static void hns_xgmac_get_link_status(void *mac_drv, u32 *link_stat)
+{
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+
+ *link_stat = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+}
+
+/**
+ *hns_xgmac_get_regs - dump xgmac regs
+ *@mac_drv: mac driver
+ *@data:data for value of regs
+ */
+static void hns_xgmac_get_regs(void *mac_drv, void *data)
+{
+ u32 i = 0;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ u32 *regs = data;
+ u64 qtmp;
+
+ /* base config registers */
+ regs[0] = dsaf_read_dev(drv, XGMAC_INT_STATUS_REG);
+ regs[1] = dsaf_read_dev(drv, XGMAC_INT_ENABLE_REG);
+ regs[2] = dsaf_read_dev(drv, XGMAC_INT_SET_REG);
+ regs[3] = dsaf_read_dev(drv, XGMAC_IERR_U_INFO_REG);
+ regs[4] = dsaf_read_dev(drv, XGMAC_OVF_INFO_REG);
+ regs[5] = dsaf_read_dev(drv, XGMAC_OVF_CNT_REG);
+ regs[6] = dsaf_read_dev(drv, XGMAC_PORT_MODE_REG);
+ regs[7] = dsaf_read_dev(drv, XGMAC_CLK_ENABLE_REG);
+ regs[8] = dsaf_read_dev(drv, XGMAC_RESET_REG);
+ regs[9] = dsaf_read_dev(drv, XGMAC_LINK_CONTROL_REG);
+ regs[10] = dsaf_read_dev(drv, XGMAC_LINK_STATUS_REG);
+
+ regs[11] = dsaf_read_dev(drv, XGMAC_SPARE_REG);
+ regs[12] = dsaf_read_dev(drv, XGMAC_SPARE_CNT_REG);
+ regs[13] = dsaf_read_dev(drv, XGMAC_MAC_ENABLE_REG);
+ regs[14] = dsaf_read_dev(drv, XGMAC_MAC_CONTROL_REG);
+ regs[15] = dsaf_read_dev(drv, XGMAC_MAC_IPG_REG);
+ regs[16] = dsaf_read_dev(drv, XGMAC_MAC_MSG_CRC_EN_REG);
+ regs[17] = dsaf_read_dev(drv, XGMAC_MAC_MSG_IMG_REG);
+ regs[18] = dsaf_read_dev(drv, XGMAC_MAC_MSG_FC_CFG_REG);
+ regs[19] = dsaf_read_dev(drv, XGMAC_MAC_MSG_TC_CFG_REG);
+ regs[20] = dsaf_read_dev(drv, XGMAC_MAC_PAD_SIZE_REG);
+ regs[21] = dsaf_read_dev(drv, XGMAC_MAC_MIN_PKT_SIZE_REG);
+ regs[22] = dsaf_read_dev(drv, XGMAC_MAC_MAX_PKT_SIZE_REG);
+ regs[23] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_CTRL_REG);
+ regs[24] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_TIME_REG);
+ regs[25] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_GAP_REG);
+ regs[26] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_H_REG);
+ regs[27] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_LOCAL_MAC_L_REG);
+ regs[28] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_H_REG);
+ regs[29] = dsaf_read_dev(drv, XGMAC_MAC_PAUSE_PEER_MAC_L_REG);
+ regs[30] = dsaf_read_dev(drv, XGMAC_MAC_PFC_PRI_EN_REG);
+ regs[31] = dsaf_read_dev(drv, XGMAC_MAC_1588_CTRL_REG);
+ regs[32] = dsaf_read_dev(drv, XGMAC_MAC_1588_TX_PORT_DLY_REG);
+ regs[33] = dsaf_read_dev(drv, XGMAC_MAC_1588_RX_PORT_DLY_REG);
+ regs[34] = dsaf_read_dev(drv, XGMAC_MAC_1588_ASYM_DLY_REG);
+ regs[35] = dsaf_read_dev(drv, XGMAC_MAC_1588_ADJUST_CFG_REG);
+
+ regs[36] = dsaf_read_dev(drv, XGMAC_MAC_Y1731_ETH_TYPE_REG);
+ regs[37] = dsaf_read_dev(drv, XGMAC_MAC_MIB_CONTROL_REG);
+ regs[38] = dsaf_read_dev(drv, XGMAC_MAC_WAN_RATE_ADJUST_REG);
+ regs[39] = dsaf_read_dev(drv, XGMAC_MAC_TX_ERR_MARK_REG);
+ regs[40] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_CONTROL_REG);
+ regs[41] = dsaf_read_dev(drv, XGMAC_MAC_RX_LF_RF_STATUS_REG);
+ regs[42] = dsaf_read_dev(drv, XGMAC_MAC_TX_RUNT_PKT_CNT_REG);
+ regs[43] = dsaf_read_dev(drv, XGMAC_MAC_RX_RUNT_PKT_CNT_REG);
+ regs[44] = dsaf_read_dev(drv, XGMAC_MAC_RX_PREAM_ERR_PKT_CNT_REG);
+ regs[45] = dsaf_read_dev(drv, XGMAC_MAC_TX_LF_RF_TERM_PKT_CNT_REG);
+ regs[46] = dsaf_read_dev(drv, XGMAC_MAC_TX_SN_MISMATCH_PKT_CNT_REG);
+ regs[47] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_MSG_CNT_REG);
+ regs[48] = dsaf_read_dev(drv, XGMAC_MAC_RX_ERR_EFD_CNT_REG);
+ regs[49] = dsaf_read_dev(drv, XGMAC_MAC_ERR_INFO_REG);
+ regs[50] = dsaf_read_dev(drv, XGMAC_MAC_DBG_INFO_REG);
+
+ regs[51] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SYNC_THD_REG);
+ regs[52] = dsaf_read_dev(drv, XGMAC_PCS_STATUS1_REG);
+ regs[53] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS1_REG);
+ regs[54] = dsaf_read_dev(drv, XGMAC_PCS_BASER_STATUS2_REG);
+ regs[55] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_0_REG);
+ regs[56] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDA_1_REG);
+ regs[57] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_0_REG);
+ regs[58] = dsaf_read_dev(drv, XGMAC_PCS_BASER_SEEDB_1_REG);
+ regs[59] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_CONTROL_REG);
+ regs[60] = dsaf_read_dev(drv, XGMAC_PCS_BASER_TEST_ERR_CNT_REG);
+ regs[61] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO_REG);
+ regs[62] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO1_REG);
+ regs[63] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO2_REG);
+ regs[64] = dsaf_read_dev(drv, XGMAC_PCS_DBG_INFO3_REG);
+
+ regs[65] = dsaf_read_dev(drv, XGMAC_PMA_ENABLE_REG);
+ regs[66] = dsaf_read_dev(drv, XGMAC_PMA_CONTROL_REG);
+ regs[67] = dsaf_read_dev(drv, XGMAC_PMA_SIGNAL_STATUS_REG);
+ regs[68] = dsaf_read_dev(drv, XGMAC_PMA_DBG_INFO_REG);
+ regs[69] = dsaf_read_dev(drv, XGMAC_PMA_FEC_ABILITY_REG);
+ regs[70] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CONTROL_REG);
+ regs[71] = dsaf_read_dev(drv, XGMAC_PMA_FEC_CORR_BLOCK_CNT__REG);
+ regs[72] = dsaf_read_dev(drv, XGMAC_PMA_FEC_UNCORR_BLOCK_CNT__REG);
+
+ /* status registers */
+#define hns_xgmac_cpy_q(p, q) \
+ do {\
+ *(p) = (u32)(q);\
+ *((p) + 1) = (u32)((q) >> 32);\
+ } while (0)
+
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_FRAGMENT);
+ hns_xgmac_cpy_q(&regs[73], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERSIZE);
+ hns_xgmac_cpy_q(&regs[75], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_UNDERMIN);
+ hns_xgmac_cpy_q(&regs[77], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_64OCTETS);
+ hns_xgmac_cpy_q(&regs[79], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_65TO127OCTETS);
+ hns_xgmac_cpy_q(&regs[81], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_128TO255OCTETS);
+ hns_xgmac_cpy_q(&regs[83], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_256TO511OCTETS);
+ hns_xgmac_cpy_q(&regs[85], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_512TO1023OCTETS);
+ hns_xgmac_cpy_q(&regs[87], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1024TO1518OCTETS);
+ hns_xgmac_cpy_q(&regs[89], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETS);
+ hns_xgmac_cpy_q(&regs[91], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_1519TOMAXOCTETSOK);
+ hns_xgmac_cpy_q(&regs[93], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_OVERSIZE);
+ hns_xgmac_cpy_q(&regs[95], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PKTS_JABBER);
+ hns_xgmac_cpy_q(&regs[97], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODPKTS);
+ hns_xgmac_cpy_q(&regs[99], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_GOODOCTETS);
+ hns_xgmac_cpy_q(&regs[101], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTAL_PKTS);
+ hns_xgmac_cpy_q(&regs[103], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_TOTALOCTETS);
+ hns_xgmac_cpy_q(&regs[105], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_UNICASTPKTS);
+ hns_xgmac_cpy_q(&regs[107], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MULTICASTPKTS);
+ hns_xgmac_cpy_q(&regs[109], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_BROADCASTPKTS);
+ hns_xgmac_cpy_q(&regs[111], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI0PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[113], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI1PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[115], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI2PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[117], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI3PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[119], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI4PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[121], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI5PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[123], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI6PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[125], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_PRI7PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[127], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_MACCTRLPKTS);
+ hns_xgmac_cpy_q(&regs[129], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1731PKTS);
+ hns_xgmac_cpy_q(&regs[131], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_1588PKTS);
+ hns_xgmac_cpy_q(&regs[133], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPGOODPKTS);
+ hns_xgmac_cpy_q(&regs[135], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FROMAPPBADPKTS);
+ hns_xgmac_cpy_q(&regs[137], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_ERRALLPKTS);
+ hns_xgmac_cpy_q(&regs[139], qtmp);
+
+ /* RX */
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_FRAGMENT);
+ hns_xgmac_cpy_q(&regs[141], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTSUNDERSIZE);
+ hns_xgmac_cpy_q(&regs[143], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_UNDERMIN);
+ hns_xgmac_cpy_q(&regs[145], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_64OCTETS);
+ hns_xgmac_cpy_q(&regs[147], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_65TO127OCTETS);
+ hns_xgmac_cpy_q(&regs[149], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_128TO255OCTETS);
+ hns_xgmac_cpy_q(&regs[151], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_256TO511OCTETS);
+ hns_xgmac_cpy_q(&regs[153], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_512TO1023OCTETS);
+ hns_xgmac_cpy_q(&regs[155], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1024TO1518OCTETS);
+ hns_xgmac_cpy_q(&regs[157], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETS);
+ hns_xgmac_cpy_q(&regs[159], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_1519TOMAXOCTETSOK);
+ hns_xgmac_cpy_q(&regs[161], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_OVERSIZE);
+ hns_xgmac_cpy_q(&regs[163], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PKTS_JABBER);
+ hns_xgmac_cpy_q(&regs[165], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODPKTS);
+ hns_xgmac_cpy_q(&regs[167], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_GOODOCTETS);
+ hns_xgmac_cpy_q(&regs[169], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTAL_PKTS);
+ hns_xgmac_cpy_q(&regs[171], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_TOTALOCTETS);
+ hns_xgmac_cpy_q(&regs[173], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_UNICASTPKTS);
+ hns_xgmac_cpy_q(&regs[175], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MULTICASTPKTS);
+ hns_xgmac_cpy_q(&regs[177], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_BROADCASTPKTS);
+ hns_xgmac_cpy_q(&regs[179], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI0PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[181], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI1PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[183], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI2PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[185], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI3PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[187], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI4PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[189], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI5PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[191], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI6PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[193], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_PRI7PAUSEPKTS);
+ hns_xgmac_cpy_q(&regs[195], qtmp);
+
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_MACCTRLPKTS);
+ hns_xgmac_cpy_q(&regs[197], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPGOODPKTS);
+ hns_xgmac_cpy_q(&regs[199], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_TX_SENDAPPBADPKTS);
+ hns_xgmac_cpy_q(&regs[201], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_1731PKTS);
+ hns_xgmac_cpy_q(&regs[203], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_SYMBOLERRPKTS);
+ hns_xgmac_cpy_q(&regs[205], qtmp);
+ qtmp = hns_mac_reg_read64(drv, XGMAC_RX_FCSERRPKTS);
+ hns_xgmac_cpy_q(&regs[207], qtmp);
+
+ /* mark end of mac regs */
+ for (i = 208; i < 214; i++)
+ regs[i] = 0xaaaaaaaa;
+}
+
+/**
+ *hns_xgmac_get_stats - get xgmac statistic
+ *@mac_drv: mac driver
+ *@data:data for value of stats regs
+ */
+static void hns_xgmac_get_stats(void *mac_drv, u64 *data)
+{
+ u32 i;
+ u64 *buf = data;
+ struct mac_driver *drv = (struct mac_driver *)mac_drv;
+ struct mac_hw_stats *hw_stats = NULL;
+
+ hw_stats = &drv->mac_cb->hw_stats;
+
+ for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+ buf[i] = DSAF_STATS_READ(hw_stats,
+ g_xgmac_stats_string[i].offset);
+ }
+}
+
+/**
+ *hns_xgmac_get_strings - get xgmac strings name
+ *@stringset: type of values in data
+ *@data:data for value of string name
+ */
+static void hns_xgmac_get_strings(u32 stringset, u8 *data)
+{
+ char *buff = (char *)data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(g_xgmac_stats_string); i++) {
+ snprintf(buff, ETH_GSTRING_LEN, g_xgmac_stats_string[i].desc);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+}
+
+/**
+ *hns_xgmac_get_sset_count - get xgmac string set count
+ *@stringset: type of values in data
+ *return xgmac string set count
+ */
+static int hns_xgmac_get_sset_count(int stringset)
+{
+ if (stringset == ETH_SS_STATS || stringset == ETH_SS_PRIV_FLAGS)
+ return ARRAY_SIZE(g_xgmac_stats_string);
+
+ return 0;
+}
+
+/**
+ *hns_xgmac_get_regs_count - get xgmac regs count
+ *return xgmac regs count
+ */
+static int hns_xgmac_get_regs_count(void)
+{
+ return HNS_XGMAC_DUMP_NUM;
+}
+
+void *hns_xgmac_config(struct hns_mac_cb *mac_cb, struct mac_params *mac_param)
+{
+ struct mac_driver *mac_drv;
+
+ mac_drv = devm_kzalloc(mac_cb->dev, sizeof(*mac_drv), GFP_KERNEL);
+ if (!mac_drv)
+ return NULL;
+
+ mac_drv->mac_init = hns_xgmac_init;
+ mac_drv->mac_enable = hns_xgmac_enable;
+ mac_drv->mac_disable = hns_xgmac_disable;
+
+ mac_drv->mac_id = mac_param->mac_id;
+ mac_drv->mac_mode = mac_param->mac_mode;
+ mac_drv->io_base = mac_param->vaddr;
+ mac_drv->dev = mac_param->dev;
+ mac_drv->mac_cb = mac_cb;
+
+ mac_drv->set_mac_addr = hns_xgmac_set_pausefrm_mac_addr;
+ mac_drv->set_an_mode = NULL;
+ mac_drv->config_loopback = NULL;
+ mac_drv->config_pad_and_crc = hns_xgmac_config_pad_and_crc;
+ mac_drv->config_half_duplex = NULL;
+ mac_drv->set_rx_ignore_pause_frames =
+ hns_xgmac_set_rx_ignore_pause_frames;
+ mac_drv->mac_free = hns_xgmac_free;
+ mac_drv->adjust_link = NULL;
+ mac_drv->set_tx_auto_pause_frames = hns_xgmac_set_tx_auto_pause_frames;
+ mac_drv->config_max_frame_length = hns_xgmac_config_max_frame_length;
+ mac_drv->mac_pausefrm_cfg = hns_xgmac_pausefrm_cfg;
+ mac_drv->autoneg_stat = NULL;
+ mac_drv->get_info = hns_xgmac_get_info;
+ mac_drv->get_pause_enable = hns_xgmac_get_pausefrm_cfg;
+ mac_drv->get_link_status = hns_xgmac_get_link_status;
+ mac_drv->get_regs = hns_xgmac_get_regs;
+ mac_drv->get_ethtool_stats = hns_xgmac_get_stats;
+ mac_drv->get_sset_count = hns_xgmac_get_sset_count;
+ mac_drv->get_regs_count = hns_xgmac_get_regs_count;
+ mac_drv->get_strings = hns_xgmac_get_strings;
+ mac_drv->update_stats = hns_xgmac_update_stats;
+
+ return (void *)mac_drv;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
new file mode 100644
index 000000000..e1b3db980
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_xgmac.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef _HNS_XGMAC_H
+#define _HNS_XGMAC_H
+
+#define HNS_XGMAC_DUMP_NUM 214
+#define HNS_XGMAC_NO_LF_RF_INSERT 0x0
+#define HNS_XGMAC_LF_INSERT 0x2
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
new file mode 100644
index 000000000..8bce5f151
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -0,0 +1,2446 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/clk.h>
+#include <linux/cpumask.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+
+#include "hnae.h"
+#include "hns_enet.h"
+#include "hns_dsaf_mac.h"
+
+#define NIC_MAX_Q_PER_VF 16
+#define HNS_NIC_TX_TIMEOUT (5 * HZ)
+
+#define SERVICE_TIMER_HZ (1 * HZ)
+
+#define RCB_IRQ_NOT_INITED 0
+#define RCB_IRQ_INITED 1
+#define HNS_BUFFER_SIZE_2048 2048
+
+#define BD_MAX_SEND_SIZE 8191
+#define SKB_TMP_LEN(SKB) \
+ (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
+
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+ int send_sz, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct iphdr *iphdr;
+ struct ipv6hdr *ipv6hdr;
+ struct sk_buff *skb;
+ __be16 protocol;
+ u8 bn_pid = 0;
+ u8 rrcfv = 0;
+ u8 ip_offset = 0;
+ u8 tvsvsn = 0;
+ u16 mss = 0;
+ u8 l4_len = 0;
+ u16 paylen = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16((u16)send_sz);
+
+ /* config bd buffer end */
+ hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
+ hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
+
+ /* fill port_id in the tx bd for sending management pkts */
+ hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
+ HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
+
+ if (type == DESC_TYPE_SKB) {
+ skb = (struct sk_buff *)priv;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_reset_mac_len(skb);
+ protocol = skb->protocol;
+ ip_offset = ETH_HLEN;
+
+ if (protocol == htons(ETH_P_8021Q)) {
+ ip_offset += VLAN_HLEN;
+ protocol = vlan_get_protocol(skb);
+ skb->protocol = protocol;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ iphdr = ip_hdr(skb);
+ hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
+ hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
+
+ /* check for tcp/udp header */
+ if (iphdr->protocol == IPPROTO_TCP &&
+ skb_is_gso(skb)) {
+ hnae_set_bit(tvsvsn,
+ HNSV2_TXD_TSE_B, 1);
+ l4_len = tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ paylen = skb->len - SKB_TMP_LEN(skb);
+ }
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
+ ipv6hdr = ipv6_hdr(skb);
+ hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
+
+ /* check for tcp/udp header */
+ if (ipv6hdr->nexthdr == IPPROTO_TCP &&
+ skb_is_gso(skb) && skb_is_gso_v6(skb)) {
+ hnae_set_bit(tvsvsn,
+ HNSV2_TXD_TSE_B, 1);
+ l4_len = tcp_hdrlen(skb);
+ mss = skb_shinfo(skb)->gso_size;
+ paylen = skb->len - SKB_TMP_LEN(skb);
+ }
+ }
+ desc->tx.ip_offset = ip_offset;
+ desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
+ desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.l4_len = l4_len;
+ desc->tx.paylen = cpu_to_le16(paylen);
+ }
+ }
+
+ hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
+
+ desc->tx.bn_pid = bn_pid;
+ desc->tx.ra_ri_cs_fe_vld = rrcfv;
+
+ ring_ptr_move_fw(ring, next_to_use);
+}
+
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+ buf_num, type, mtu);
+}
+
+static const struct acpi_device_id hns_enet_acpi_match[] = {
+ { "HISI00C1", 0 },
+ { "HISI00C2", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
+
+static void fill_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ struct hnae_desc *desc = &ring->desc[ring->next_to_use];
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct sk_buff *skb;
+ __be16 protocol;
+ u32 ip_offset;
+ u32 asid_bufnum_pid = 0;
+ u32 flag_ipoffset = 0;
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16((u16)size);
+
+ /*config bd buffer end */
+ flag_ipoffset |= 1 << HNS_TXD_VLD_B;
+
+ asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
+
+ if (type == DESC_TYPE_SKB) {
+ skb = (struct sk_buff *)priv;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ protocol = skb->protocol;
+ ip_offset = ETH_HLEN;
+
+ /*if it is a SW VLAN check the next protocol*/
+ if (protocol == htons(ETH_P_8021Q)) {
+ ip_offset += VLAN_HLEN;
+ protocol = vlan_get_protocol(skb);
+ skb->protocol = protocol;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
+ /* check for tcp/udp header */
+ flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ /* ipv6 has not l3 cs, check for L4 header */
+ flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
+ }
+
+ flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
+ }
+ }
+
+ flag_ipoffset |= frag_end << HNS_TXD_FE_B;
+
+ desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
+ desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
+
+ ring_ptr_move_fw(ring, next_to_use);
+}
+
+static void unfill_desc(struct hnae_ring *ring)
+{
+ ring_ptr_move_bw(ring, next_to_use);
+}
+
+static int hns_nic_maybe_stop_tx(
+ struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
+{
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
+ int buf_num;
+
+ /* no. of segments (plus a header) */
+ buf_num = skb_shinfo(skb)->nr_frags + 1;
+
+ if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
+ if (ring_space(ring) < 1)
+ return -EBUSY;
+
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
+ buf_num = 1;
+ } else if (buf_num > ring_space(ring)) {
+ return -EBUSY;
+ }
+
+ *bnum = buf_num;
+ return 0;
+}
+
+static int hns_nic_maybe_stop_tso(
+ struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
+{
+ int i;
+ int size;
+ int buf_num;
+ int frag_num;
+ struct sk_buff *skb = *out_skb;
+ struct sk_buff *new_skb = NULL;
+ skb_frag_t *frag;
+
+ size = skb_headlen(skb);
+ buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+
+ frag_num = skb_shinfo(skb)->nr_frags;
+ for (i = 0; i < frag_num; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ size = skb_frag_size(frag);
+ buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+ }
+
+ if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
+ buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+ if (ring_space(ring) < buf_num)
+ return -EBUSY;
+ /* manual split the send packet */
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ if (!new_skb)
+ return -ENOMEM;
+ dev_kfree_skb_any(skb);
+ *out_skb = new_skb;
+
+ } else if (ring_space(ring) < buf_num) {
+ return -EBUSY;
+ }
+
+ *bnum = buf_num;
+ return 0;
+}
+
+static void fill_tso_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ int frag_buf_num;
+ int sizeoflast;
+ int k;
+
+ frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
+ sizeoflast = size % BD_MAX_SEND_SIZE;
+ sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
+
+ /* when the frag size is bigger than hardware, split this frag */
+ for (k = 0; k < frag_buf_num; k++)
+ fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+ (k == frag_buf_num - 1) ?
+ sizeoflast : BD_MAX_SEND_SIZE,
+ dma + BD_MAX_SEND_SIZE * k,
+ frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+ buf_num,
+ (type == DESC_TYPE_SKB && !k) ?
+ DESC_TYPE_SKB : DESC_TYPE_PAGE,
+ mtu);
+}
+
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_ring *ring = ring_data->ring;
+ struct device *dev = ring_to_dev(ring);
+ struct netdev_queue *dev_queue;
+ skb_frag_t *frag;
+ int buf_num;
+ int seg_num;
+ dma_addr_t dma;
+ int size, next_to_use;
+ int i;
+
+ switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
+ case -EBUSY:
+ ring->stats.tx_busy++;
+ goto out_net_tx_busy;
+ case -ENOMEM:
+ ring->stats.sw_err_cnt++;
+ netdev_err(ndev, "no memory to xmit!\n");
+ goto out_err_tx_ok;
+ default:
+ break;
+ }
+
+ /* no. of segments (plus a header) */
+ seg_num = skb_shinfo(skb)->nr_frags + 1;
+ next_to_use = ring->next_to_use;
+
+ /* fill the first part */
+ size = skb_headlen(skb);
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ netdev_err(ndev, "TX head DMA map failed\n");
+ ring->stats.sw_err_cnt++;
+ goto out_err_tx_ok;
+ }
+ priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
+ buf_num, DESC_TYPE_SKB, ndev->mtu);
+
+ /* fill the fragments */
+ for (i = 1; i < seg_num; i++) {
+ frag = &skb_shinfo(skb)->frags[i - 1];
+ size = skb_frag_size(frag);
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma)) {
+ netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
+ ring->stats.sw_err_cnt++;
+ goto out_map_frag_fail;
+ }
+ priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
+ seg_num - 1 == i ? 1 : 0, buf_num,
+ DESC_TYPE_PAGE, ndev->mtu);
+ }
+
+ /*complete translate all packets*/
+ dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
+ netdev_tx_sent_queue(dev_queue, skb->len);
+
+ netif_trans_update(ndev);
+ ndev->stats.tx_bytes += skb->len;
+ ndev->stats.tx_packets++;
+
+ wmb(); /* commit all data before submit */
+ assert(skb->queue_mapping < priv->ae_handle->q_num);
+ hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
+
+ return NETDEV_TX_OK;
+
+out_map_frag_fail:
+
+ while (ring->next_to_use != next_to_use) {
+ unfill_desc(ring);
+ if (ring->next_to_use != next_to_use)
+ dma_unmap_page(dev,
+ ring->desc_cb[ring->next_to_use].dma,
+ ring->desc_cb[ring->next_to_use].length,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(dev,
+ ring->desc_cb[next_to_use].dma,
+ ring->desc_cb[next_to_use].length,
+ DMA_TO_DEVICE);
+ }
+
+out_err_tx_ok:
+
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+out_net_tx_busy:
+
+ netif_stop_subqueue(ndev, skb->queue_mapping);
+
+ /* Herbert's original patch had:
+ * smp_mb__after_netif_stop_queue();
+ * but since that doesn't exist yet, just open code it.
+ */
+ smp_mb();
+ return NETDEV_TX_BUSY;
+}
+
+static void hns_nic_reuse_page(struct sk_buff *skb, int i,
+ struct hnae_ring *ring, int pull_len,
+ struct hnae_desc_cb *desc_cb)
+{
+ struct hnae_desc *desc;
+ u32 truesize;
+ int size;
+ int last_offset;
+ bool twobufs;
+
+ twobufs = ((PAGE_SIZE < 8192) &&
+ hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
+
+ desc = &ring->desc[ring->next_to_clean];
+ size = le16_to_cpu(desc->rx.size);
+
+ if (twobufs) {
+ truesize = hnae_buf_size(ring);
+ } else {
+ truesize = ALIGN(size, L1_CACHE_BYTES);
+ last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
+ }
+
+ skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
+ size - pull_len, truesize);
+
+ /* avoid re-using remote pages,flag default unreuse */
+ if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
+ return;
+
+ if (twobufs) {
+ /* if we are only owner of page we can reuse it */
+ if (likely(page_count(desc_cb->priv) == 1)) {
+ /* flip page offset to other buffer */
+ desc_cb->page_offset ^= truesize;
+
+ desc_cb->reuse_flag = 1;
+ /* bump ref count on page before it is given*/
+ get_page(desc_cb->priv);
+ }
+ return;
+ }
+
+ /* move offset up to the next cache line */
+ desc_cb->page_offset += truesize;
+
+ if (desc_cb->page_offset <= last_offset) {
+ desc_cb->reuse_flag = 1;
+ /* bump ref count on page before it is given*/
+ get_page(desc_cb->priv);
+ }
+}
+
+static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
+{
+ *out_bnum = hnae_get_field(bnum_flag,
+ HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
+}
+
+static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
+{
+ *out_bnum = hnae_get_field(bnum_flag,
+ HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
+}
+
+static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb, u32 flag)
+{
+ struct net_device *netdev = ring_data->napi.dev;
+ u32 l3id;
+ u32 l4id;
+
+ /* check if RX checksum offload is enabled */
+ if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
+ return;
+
+ /* In hardware, we only support checksum for the following protocols:
+ * 1) IPv4,
+ * 2) TCP(over IPv4 or IPv6),
+ * 3) UDP(over IPv4 or IPv6),
+ * 4) SCTP(over IPv4 or IPv6)
+ * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
+ * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
+ *
+ * Hardware limitation:
+ * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
+ * Error" bit (which usually can be used to indicate whether checksum
+ * was calculated by the hardware and if there was any error encountered
+ * during checksum calculation).
+ *
+ * Software workaround:
+ * We do get info within the RX descriptor about the kind of L3/L4
+ * protocol coming in the packet and the error status. These errors
+ * might not just be checksum errors but could be related to version,
+ * length of IPv4, UDP, TCP etc.
+ * Because there is no-way of knowing if it is a L3/L4 error due to bad
+ * checksum or any other L3/L4 error, we will not (cannot) convey
+ * checksum status for such cases to upper stack and will not maintain
+ * the RX L3/L4 checksum counters as well.
+ */
+
+ l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
+ l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
+
+ /* check L3 protocol for which checksum is supported */
+ if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
+ return;
+
+ /* check for any(not just checksum)flagged L3 protocol errors */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
+ return;
+
+ /* we do not support checksum of fragmented packets */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
+ return;
+
+ /* check L4 protocol for which checksum is supported */
+ if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
+ (l4id != HNS_RX_FLAG_L4ID_UDP) &&
+ (l4id != HNS_RX_FLAG_L4ID_SCTP))
+ return;
+
+ /* check for any(not just checksum)flagged L4 protocol errors */
+ if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
+ return;
+
+ /* now, this has to be a packet with valid RX checksum */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+}
+
+static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
+ struct sk_buff **out_skb, int *out_bnum)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ struct hnae_desc *desc;
+ struct hnae_desc_cb *desc_cb;
+ unsigned char *va;
+ int bnum, length, i;
+ int pull_len;
+ u32 bnum_flag;
+
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ prefetch(desc);
+
+ va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
+
+ /* prefetch first cache line of first page */
+ net_prefetch(va);
+
+ skb = *out_skb = napi_alloc_skb(&ring_data->napi,
+ HNS_RX_HEAD_SIZE);
+ if (unlikely(!skb)) {
+ ring->stats.sw_err_cnt++;
+ return -ENOMEM;
+ }
+
+ prefetchw(skb->data);
+ length = le16_to_cpu(desc->rx.pkt_len);
+ bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+ priv->ops.get_rxd_bnum(bnum_flag, &bnum);
+ *out_bnum = bnum;
+
+ if (length <= HNS_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* we can reuse buffer as-is, just make sure it is local */
+ if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
+ desc_cb->reuse_flag = 1;
+ else /* this page cannot be reused so discard it */
+ put_page(desc_cb->priv);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ if (unlikely(bnum != 1)) { /* check err*/
+ *out_bnum = 1;
+ goto out_bnum_err;
+ }
+ } else {
+ ring->stats.seg_pkt_cnt++;
+
+ pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE);
+ memcpy(__skb_put(skb, pull_len), va,
+ ALIGN(pull_len, sizeof(long)));
+
+ hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+
+ if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
+ *out_bnum = 1;
+ goto out_bnum_err;
+ }
+ for (i = 1; i < bnum; i++) {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
+ ring_ptr_move_fw(ring, next_to_clean);
+ }
+ }
+
+ /* check except process, free skb and jump the desc */
+ if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
+out_bnum_err:
+ *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
+ netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
+ bnum, ring->max_desc_num_per_pkt,
+ length, (int)MAX_SKB_FRAGS,
+ ((u64 *)desc)[0], ((u64 *)desc)[1]);
+ ring->stats.err_bd_num++;
+ dev_kfree_skb_any(skb);
+ return -EDOM;
+ }
+
+ bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
+
+ if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
+ netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
+ ((u64 *)desc)[0], ((u64 *)desc)[1]);
+ ring->stats.non_vld_descs++;
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (unlikely((!desc->rx.pkt_len) ||
+ hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
+ ring->stats.err_pkt_len++;
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
+ ring->stats.l2_err++;
+ dev_kfree_skb_any(skb);
+ return -EFAULT;
+ }
+
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += skb->len;
+
+ /* indicate to upper stack if our hardware has already calculated
+ * the RX checksum
+ */
+ hns_nic_rx_checksum(ring_data, skb, bnum_flag);
+
+ return 0;
+}
+
+static void
+hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
+{
+ int i, ret;
+ struct hnae_desc_cb res_cbs;
+ struct hnae_desc_cb *desc_cb;
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+
+ for (i = 0; i < cleand_count; i++) {
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+ if (desc_cb->reuse_flag) {
+ ring->stats.reuse_pg_cnt++;
+ hnae_reuse_buffer(ring, ring->next_to_use);
+ } else {
+ ret = hnae_reserve_buffer_map(ring, &res_cbs);
+ if (ret) {
+ ring->stats.sw_err_cnt++;
+ netdev_err(ndev, "hnae reserve buffer map failed.\n");
+ break;
+ }
+ hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ wmb(); /* make all data has been write before submit */
+ writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
+}
+
+/* return error number for error or number of desc left to take
+ */
+static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ struct net_device *ndev = ring_data->napi.dev;
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ napi_gro_receive(&ring_data->napi, skb);
+}
+
+static int hns_desc_unused(struct hnae_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
+}
+
+#define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */
+#define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */
+
+#define HNS_COAL_BDNUM 3
+
+static u32 hns_coal_rx_bdnum(struct hnae_ring *ring)
+{
+ bool coal_enable = ring->q->handle->coal_adapt_en;
+
+ if (coal_enable &&
+ ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE)
+ return HNS_COAL_BDNUM;
+ else
+ return 0;
+}
+
+static void hns_update_rx_rate(struct hnae_ring *ring)
+{
+ bool coal_enable = ring->q->handle->coal_adapt_en;
+ u32 time_passed_ms;
+ u64 total_bytes;
+
+ if (!coal_enable ||
+ time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4)))
+ return;
+
+ /* ring->stats.rx_bytes overflowed */
+ if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) {
+ ring->coal_last_rx_bytes = ring->stats.rx_bytes;
+ ring->coal_last_jiffies = jiffies;
+ return;
+ }
+
+ total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes;
+ time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies);
+ do_div(total_bytes, time_passed_ms);
+ ring->coal_rx_rate = total_bytes >> 10;
+
+ ring->coal_last_rx_bytes = ring->stats.rx_bytes;
+ ring->coal_last_jiffies = jiffies;
+}
+
+/**
+ * smooth_alg - smoothing algrithm for adjusting coalesce parameter
+ * @new_param: new value
+ * @old_param: old value
+ **/
+static u32 smooth_alg(u32 new_param, u32 old_param)
+{
+ u32 gap = (new_param > old_param) ? new_param - old_param
+ : old_param - new_param;
+
+ if (gap > 8)
+ gap >>= 3;
+
+ if (new_param > old_param)
+ return old_param + gap;
+ else
+ return old_param - gap;
+}
+
+/**
+ * hns_nic_adp_coalesce - self adapte coalesce according to rx rate
+ * @ring_data: pointer to hns_nic_ring_data
+ **/
+static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct hnae_handle *handle = ring->q->handle;
+ u32 new_coal_param, old_coal_param = ring->coal_param;
+
+ if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE)
+ new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
+ else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE)
+ new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM;
+ else
+ new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM;
+
+ if (new_coal_param == old_coal_param &&
+ new_coal_param == handle->coal_param)
+ return;
+
+ new_coal_param = smooth_alg(new_coal_param, old_coal_param);
+ ring->coal_param = new_coal_param;
+
+ /**
+ * Because all ring in one port has one coalesce param, when one ring
+ * calculate its own coalesce param, it cannot write to hardware at
+ * once. There are three conditions as follows:
+ * 1. current ring's coalesce param is larger than the hardware.
+ * 2. or ring which adapt last time can change again.
+ * 3. timeout.
+ */
+ if (new_coal_param == handle->coal_param) {
+ handle->coal_last_jiffies = jiffies;
+ handle->coal_ring_idx = ring_data->queue_index;
+ } else if (new_coal_param > handle->coal_param ||
+ handle->coal_ring_idx == ring_data->queue_index ||
+ time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) {
+ handle->dev->ops->set_coalesce_usecs(handle,
+ new_coal_param);
+ handle->dev->ops->set_coalesce_frames(handle,
+ 1, new_coal_param);
+ handle->coal_param = new_coal_param;
+ handle->coal_ring_idx = ring_data->queue_index;
+ handle->coal_last_jiffies = jiffies;
+ }
+}
+
+static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
+ int budget, void *v)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct sk_buff *skb;
+ int num, bnum;
+#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+ int recv_pkts, recv_bds, clean_count, err;
+ int unused_count = hns_desc_unused(ring);
+
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+ rmb(); /* make sure num taken effect before the other data is touched */
+
+ recv_pkts = 0, recv_bds = 0, clean_count = 0;
+ num -= unused_count;
+
+ while (recv_pkts < budget && recv_bds < num) {
+ /* reuse or realloc buffers */
+ if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
+ clean_count = 0;
+ unused_count = hns_desc_unused(ring);
+ }
+
+ /* poll one pkt */
+ err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
+ if (unlikely(!skb)) /* this fault cannot be repaired */
+ goto out;
+
+ recv_bds += bnum;
+ clean_count += bnum;
+ if (unlikely(err)) { /* do jump the err */
+ recv_pkts++;
+ continue;
+ }
+
+ /* do update ip stack process*/
+ ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
+ ring_data, skb);
+ recv_pkts++;
+ }
+
+out:
+ /* make all data has been write before submit */
+ if (clean_count + unused_count > 0)
+ hns_nic_alloc_rx_buffers(ring_data,
+ clean_count + unused_count);
+
+ return recv_pkts;
+}
+
+static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int num = 0;
+ bool rx_stopped;
+
+ hns_update_rx_rate(ring);
+
+ /* for hardware bug fixed */
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+ if (num <= hns_coal_rx_bdnum(ring)) {
+ if (ring->q->handle->coal_adapt_en)
+ hns_nic_adpt_coalesce(ring_data);
+
+ rx_stopped = true;
+ } else {
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+
+ rx_stopped = false;
+ }
+
+ return rx_stopped;
+}
+
+static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int num;
+
+ hns_update_rx_rate(ring);
+ num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
+
+ if (num <= hns_coal_rx_bdnum(ring)) {
+ if (ring->q->handle->coal_adapt_en)
+ hns_nic_adpt_coalesce(ring_data);
+
+ return true;
+ }
+
+ return false;
+}
+
+static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
+ int *bytes, int *pkts)
+{
+ struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+ (*bytes) += desc_cb->length;
+ /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
+ hnae_free_buffer_detach(ring, ring->next_to_clean);
+
+ ring_ptr_move_fw(ring, next_to_clean);
+}
+
+static int is_valid_clean_head(struct hnae_ring *ring, int h)
+{
+ int u = ring->next_to_use;
+ int c = ring->next_to_clean;
+
+ if (unlikely(h > ring->desc_num))
+ return 0;
+
+ assert(u > 0 && u < ring->desc_num);
+ assert(c > 0 && c < ring->desc_num);
+ assert(u != c && h != c); /* must be checked before call this func */
+
+ return u > c ? (h > c && h <= u) : (h > c || h <= u);
+}
+
+/* reclaim all desc in one budget
+ * return error or number of desc left
+ */
+static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
+ int budget, void *v)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct netdev_queue *dev_queue;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ int head;
+ int bytes, pkts;
+
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ rmb(); /* make sure head is ready before touch any data */
+
+ if (is_ring_empty(ring) || head == ring->next_to_clean)
+ return 0; /* no data to poll */
+
+ if (!is_valid_clean_head(ring, head)) {
+ netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
+ ring->next_to_use, ring->next_to_clean);
+ ring->stats.io_err_cnt++;
+ return -EIO;
+ }
+
+ bytes = 0;
+ pkts = 0;
+ while (head != ring->next_to_clean) {
+ hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+ /* issue prefetch for next Tx descriptor */
+ prefetch(&ring->desc_cb[ring->next_to_clean]);
+ }
+ /* update tx ring statistics. */
+ ring->stats.tx_pkts += pkts;
+ ring->stats.tx_bytes += bytes;
+
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_completed_queue(dev_queue, pkts, bytes);
+
+ if (unlikely(priv->link && !netif_carrier_ok(ndev)))
+ netif_carrier_on(ndev);
+
+ if (unlikely(pkts && netif_carrier_ok(ndev) &&
+ (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_tx_queue_stopped(dev_queue) &&
+ !test_bit(NIC_STATE_DOWN, &priv->state)) {
+ netif_tx_wake_queue(dev_queue);
+ ring->stats.restart_queue++;
+ }
+ }
+ return 0;
+}
+
+static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int head;
+
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+ if (head != ring->next_to_clean) {
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+
+ return false;
+ } else {
+ return true;
+ }
+}
+
+static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+
+ if (head == ring->next_to_clean)
+ return true;
+ else
+ return false;
+}
+
+static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
+{
+ struct hnae_ring *ring = ring_data->ring;
+ struct net_device *ndev = ring_data->napi.dev;
+ struct netdev_queue *dev_queue;
+ int head;
+ int bytes, pkts;
+
+ head = ring->next_to_use; /* ntu :soft setted ring position*/
+ bytes = 0;
+ pkts = 0;
+ while (head != ring->next_to_clean)
+ hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
+
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_reset_queue(dev_queue);
+}
+
+static int hns_nic_common_poll(struct napi_struct *napi, int budget)
+{
+ int clean_complete = 0;
+ struct hns_nic_ring_data *ring_data =
+ container_of(napi, struct hns_nic_ring_data, napi);
+ struct hnae_ring *ring = ring_data->ring;
+
+ clean_complete += ring_data->poll_one(
+ ring_data, budget - clean_complete,
+ ring_data->ex_process);
+
+ if (clean_complete < budget) {
+ if (ring_data->fini_process(ring_data)) {
+ napi_complete(napi);
+ ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
+ } else {
+ return budget;
+ }
+ }
+
+ return clean_complete;
+}
+
+static irqreturn_t hns_irq_handle(int irq, void *dev)
+{
+ struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
+
+ ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
+ ring_data->ring, 1);
+ napi_schedule(&ring_data->napi);
+
+ return IRQ_HANDLED;
+}
+
+/**
+ *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
+ *@ndev: net device
+ */
+static void hns_nic_adjust_link(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int state = 1;
+
+ /* If there is no phy, do not need adjust link */
+ if (ndev->phydev) {
+ /* When phy link down, do nothing */
+ if (ndev->phydev->link == 0)
+ return;
+
+ if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
+ ndev->phydev->duplex)) {
+ /* because Hi161X chip don't support to change gmac
+ * speed and duplex with traffic. Delay 200ms to
+ * make sure there is no more data in chip FIFO.
+ */
+ netif_carrier_off(ndev);
+ msleep(200);
+ h->dev->ops->adjust_link(h, ndev->phydev->speed,
+ ndev->phydev->duplex);
+ netif_carrier_on(ndev);
+ }
+ }
+
+ state = state && h->dev->ops->get_status(h);
+
+ if (state != priv->link) {
+ if (state) {
+ netif_carrier_on(ndev);
+ netif_tx_wake_all_queues(ndev);
+ netdev_info(ndev, "link up\n");
+ } else {
+ netif_carrier_off(ndev);
+ netdev_info(ndev, "link down\n");
+ }
+ priv->link = state;
+ }
+}
+
+/**
+ *hns_nic_init_phy - init phy
+ *@ndev: net device
+ *@h: ae handle
+ * Return 0 on success, negative on failure
+ */
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
+{
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
+ struct phy_device *phy_dev = h->phy_dev;
+ int ret;
+
+ if (!h->phy_dev)
+ return 0;
+
+ ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
+ linkmode_and(phy_dev->supported, phy_dev->supported, supported);
+ linkmode_copy(phy_dev->advertising, phy_dev->supported);
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
+ phy_dev->autoneg = false;
+
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ phy_dev->dev_flags = 0;
+
+ ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
+ h->phy_if);
+ } else {
+ ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
+ }
+ if (unlikely(ret))
+ return -ENODEV;
+
+ phy_attached_info(phy_dev);
+
+ return 0;
+}
+
+static int hns_nic_ring_open(struct net_device *netdev, int idx)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ napi_enable(&priv->ring_data[idx].napi);
+
+ enable_irq(priv->ring_data[idx].ring->irq);
+ h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
+
+ return 0;
+}
+
+static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct sockaddr *mac_addr = p;
+ int ret;
+
+ if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
+ if (ret) {
+ netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
+
+ return 0;
+}
+
+static void hns_nic_update_stats(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->update_stats(h, &netdev->stats);
+}
+
+/* set mac addr if it is configed. or leave it to the AE driver */
+static void hns_init_mac_addr(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
+ eth_hw_addr_random(ndev);
+ dev_warn(priv->dev, "No valid mac, use random mac %pM",
+ ndev->dev_addr);
+ }
+}
+
+static void hns_nic_ring_close(struct net_device *netdev, int idx)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
+ disable_irq(priv->ring_data[idx].ring->irq);
+
+ napi_disable(&priv->ring_data[idx].napi);
+}
+
+static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
+ struct hnae_ring *ring, cpumask_t *mask)
+{
+ int cpu;
+
+ /* Diffrent irq banlance between 16core and 32core.
+ * The cpu mask set by ring index according to the ring flag
+ * which indicate the ring is tx or rx.
+ */
+ if (q_num == num_possible_cpus()) {
+ if (is_tx_ring(ring))
+ cpu = ring_idx;
+ else
+ cpu = ring_idx - q_num;
+ } else {
+ if (is_tx_ring(ring))
+ cpu = ring_idx * 2;
+ else
+ cpu = (ring_idx - q_num) * 2 + 1;
+ }
+
+ cpumask_clear(mask);
+ cpumask_set_cpu(cpu, mask);
+
+ return cpu;
+}
+
+static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < q_num * 2; i++) {
+ if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+ irq_set_affinity_hint(priv->ring_data[i].ring->irq,
+ NULL);
+ free_irq(priv->ring_data[i].ring->irq,
+ &priv->ring_data[i]);
+ priv->ring_data[i].ring->irq_init_flag =
+ RCB_IRQ_NOT_INITED;
+ }
+ }
+}
+
+static int hns_nic_init_irq(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hns_nic_ring_data *rd;
+ int i;
+ int ret;
+ int cpu;
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+
+ if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
+ break;
+
+ snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
+ "%s-%s%d", priv->netdev->name,
+ (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
+
+ rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
+
+ irq_set_status_flags(rd->ring->irq, IRQ_NOAUTOEN);
+ ret = request_irq(rd->ring->irq,
+ hns_irq_handle, 0, rd->ring->ring_name, rd);
+ if (ret) {
+ netdev_err(priv->netdev, "request irq(%d) fail\n",
+ rd->ring->irq);
+ goto out_free_irq;
+ }
+
+ cpu = hns_nic_init_affinity_mask(h->q_num, i,
+ rd->ring, &rd->mask);
+
+ if (cpu_online(cpu))
+ irq_set_affinity_hint(rd->ring->irq,
+ &rd->mask);
+
+ rd->ring->irq_init_flag = RCB_IRQ_INITED;
+ }
+
+ return 0;
+
+out_free_irq:
+ hns_nic_free_irq(h->q_num, priv);
+ return ret;
+}
+
+static int hns_nic_net_up(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int i, j;
+ int ret;
+
+ if (!test_bit(NIC_STATE_DOWN, &priv->state))
+ return 0;
+
+ ret = hns_nic_init_irq(priv);
+ if (ret != 0) {
+ netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ ret = hns_nic_ring_open(ndev, i);
+ if (ret)
+ goto out_has_some_queues;
+ }
+
+ ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
+ if (ret)
+ goto out_set_mac_addr_err;
+
+ ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+ if (ret)
+ goto out_start_err;
+
+ if (ndev->phydev)
+ phy_start(ndev->phydev);
+
+ clear_bit(NIC_STATE_DOWN, &priv->state);
+ (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+ return 0;
+
+out_start_err:
+ netif_stop_queue(ndev);
+out_set_mac_addr_err:
+out_has_some_queues:
+ for (j = i - 1; j >= 0; j--)
+ hns_nic_ring_close(ndev, j);
+
+ hns_nic_free_irq(h->q_num, priv);
+ set_bit(NIC_STATE_DOWN, &priv->state);
+
+ return ret;
+}
+
+static void hns_nic_net_down(struct net_device *ndev)
+{
+ int i;
+ struct hnae_ae_ops *ops;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
+ return;
+
+ (void)del_timer_sync(&priv->service_timer);
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+ netif_tx_disable(ndev);
+ priv->link = 0;
+
+ if (ndev->phydev)
+ phy_stop(ndev->phydev);
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->stop)
+ ops->stop(priv->ae_handle);
+
+ netif_tx_stop_all_queues(ndev);
+
+ for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
+ hns_nic_ring_close(ndev, i);
+ hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
+
+ /* clean tx buffers*/
+ hns_nic_tx_clr_all_bufs(priv->ring_data + i);
+ }
+}
+
+void hns_nic_net_reset(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *handle = priv->ae_handle;
+
+ while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
+ usleep_range(1000, 2000);
+
+ (void)hnae_reinit_handle(handle);
+
+ clear_bit(NIC_STATE_RESETTING, &priv->state);
+}
+
+void hns_nic_net_reinit(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ enum hnae_port_type type = priv->ae_handle->port_type;
+
+ netif_trans_update(priv->netdev);
+ while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
+ usleep_range(1000, 2000);
+
+ hns_nic_net_down(netdev);
+
+ /* Only do hns_nic_net_reset in debug mode
+ * because of hardware limitation.
+ */
+ if (type == HNAE_PORT_DEBUG)
+ hns_nic_net_reset(netdev);
+
+ (void)hns_nic_net_up(netdev);
+ clear_bit(NIC_STATE_REINITING, &priv->state);
+}
+
+static int hns_nic_net_open(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ if (test_bit(NIC_STATE_TESTING, &priv->state))
+ return -EBUSY;
+
+ priv->link = 0;
+ netif_carrier_off(ndev);
+
+ ret = netif_set_real_num_tx_queues(ndev, h->q_num);
+ if (ret < 0) {
+ netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
+ ret);
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, h->q_num);
+ if (ret < 0) {
+ netdev_err(ndev,
+ "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ ret = hns_nic_net_up(ndev);
+ if (ret) {
+ netdev_err(ndev,
+ "hns net up fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hns_nic_net_stop(struct net_device *ndev)
+{
+ hns_nic_net_down(ndev);
+
+ return 0;
+}
+
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
+#define HNS_TX_TIMEO_LIMIT (40 * HZ)
+static void hns_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
+ ndev->watchdog_timeo *= 2;
+ netdev_info(ndev, "watchdog_timo changed to %d.\n",
+ ndev->watchdog_timeo);
+ } else {
+ ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+ hns_tx_timeout_reset(priv);
+ }
+}
+
+static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ assert(skb->queue_mapping < ndev->ae_handle->q_num);
+
+ return hns_nic_net_xmit_hw(ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
+}
+
+static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ dev_kfree_skb_any(skb);
+}
+
+#define HNS_LB_TX_RING 0
+static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
+{
+ struct sk_buff *skb;
+ struct ethhdr *ethhdr;
+ int frame_len;
+
+ /* allocate test skb */
+ skb = alloc_skb(64, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ skb_put(skb, 64);
+ skb->dev = ndev;
+ memset(skb->data, 0xFF, skb->len);
+
+ /* must be tcp/ip package */
+ ethhdr = (struct ethhdr *)skb->data;
+ ethhdr->h_proto = htons(ETH_P_IP);
+
+ frame_len = skb->len & (~1ul);
+ memset(&skb->data[frame_len / 2], 0xAA,
+ frame_len / 2 - 1);
+
+ skb->queue_mapping = HNS_LB_TX_RING;
+
+ return skb;
+}
+
+static int hns_enable_serdes_lb(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ int speed, duplex;
+ int ret;
+
+ ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
+ if (ret)
+ return ret;
+
+ ret = ops->start ? ops->start(h) : 0;
+ if (ret)
+ return ret;
+
+ /* link adjust duplex*/
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ speed = 1000;
+ else
+ speed = 10000;
+ duplex = 1;
+
+ ops->adjust_link(h, speed, duplex);
+
+ /* wait h/w ready */
+ mdelay(300);
+
+ return 0;
+}
+
+static void hns_disable_serdes_lb(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+
+ ops->stop(h);
+ ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
+}
+
+/**
+ *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
+ *function as follows:
+ * 1. if one rx ring has found the page_offset is not equal 0 between head
+ * and tail, it means that the chip fetched the wrong descs for the ring
+ * which buffer size is 4096.
+ * 2. we set the chip serdes loopback and set rss indirection to the ring.
+ * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
+ * recieving all packages and it will fetch new descriptions.
+ * 4. recover to the original state.
+ *
+ *@ndev: net device
+ */
+static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ struct hns_nic_ring_data *rd;
+ struct hnae_ring *ring;
+ struct sk_buff *skb;
+ u32 *org_indir;
+ u32 *cur_indir;
+ int indir_size;
+ int head, tail;
+ int fetch_num;
+ int i, j;
+ bool found;
+ int retry_times;
+ int ret = 0;
+
+ /* alloc indir memory */
+ indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
+ org_indir = kzalloc(indir_size, GFP_KERNEL);
+ if (!org_indir)
+ return -ENOMEM;
+
+ /* store the orginal indirection */
+ ops->get_rss(h, org_indir, NULL, NULL);
+
+ cur_indir = kzalloc(indir_size, GFP_KERNEL);
+ if (!cur_indir) {
+ ret = -ENOMEM;
+ goto cur_indir_alloc_err;
+ }
+
+ /* set loopback */
+ if (hns_enable_serdes_lb(ndev)) {
+ ret = -EINVAL;
+ goto enable_serdes_lb_err;
+ }
+
+ /* foreach every rx ring to clear fetch desc */
+ for (i = 0; i < h->q_num; i++) {
+ ring = &h->qs[i]->rx_ring;
+ head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
+ tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
+ found = false;
+ fetch_num = ring_dist(ring, head, tail);
+
+ while (head != tail) {
+ if (ring->desc_cb[head].page_offset != 0) {
+ found = true;
+ break;
+ }
+
+ head++;
+ if (head == ring->desc_num)
+ head = 0;
+ }
+
+ if (found) {
+ for (j = 0; j < indir_size / sizeof(*org_indir); j++)
+ cur_indir[j] = i;
+ ops->set_rss(h, cur_indir, NULL, 0);
+
+ for (j = 0; j < fetch_num; j++) {
+ /* alloc one skb and init */
+ skb = hns_assemble_skb(ndev);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ rd = &tx_ring_data(priv, skb->queue_mapping);
+ hns_nic_net_xmit_hw(ndev, skb, rd);
+
+ retry_times = 0;
+ while (retry_times++ < 10) {
+ mdelay(10);
+ /* clean rx */
+ rd = &rx_ring_data(priv, i);
+ if (rd->poll_one(rd, fetch_num,
+ hns_nic_drop_rx_fetch))
+ break;
+ }
+
+ retry_times = 0;
+ while (retry_times++ < 10) {
+ mdelay(10);
+ /* clean tx ring 0 send package */
+ rd = &tx_ring_data(priv,
+ HNS_LB_TX_RING);
+ if (rd->poll_one(rd, fetch_num, NULL))
+ break;
+ }
+ }
+ }
+ }
+
+out:
+ /* restore everything */
+ ops->set_rss(h, org_indir, NULL, 0);
+ hns_disable_serdes_lb(ndev);
+enable_serdes_lb_err:
+ kfree(cur_indir);
+cur_indir_alloc_err:
+ kfree(org_indir);
+
+ return ret;
+}
+
+static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ bool if_running = netif_running(ndev);
+ int ret;
+
+ /* MTU < 68 is an error and causes problems on some kernels */
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ /* MTU no change */
+ if (new_mtu == ndev->mtu)
+ return 0;
+
+ if (!h->dev->ops->set_mtu)
+ return -ENOTSUPP;
+
+ if (if_running) {
+ (void)hns_nic_net_stop(ndev);
+ msleep(100);
+ }
+
+ if (priv->enet_ver != AE_VERSION_1 &&
+ ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
+ new_mtu > BD_SIZE_2048_MAX_MTU) {
+ /* update desc */
+ hnae_reinit_all_ring_desc(h);
+
+ /* clear the package which the chip has fetched */
+ ret = hns_nic_clear_all_rx_fetch(ndev);
+
+ /* the page offset must be consist with desc */
+ hnae_reinit_all_ring_page_off(h);
+
+ if (ret) {
+ netdev_err(ndev, "clear the fetched desc fail\n");
+ goto out;
+ }
+ }
+
+ ret = h->dev->ops->set_mtu(h, new_mtu);
+ if (ret) {
+ netdev_err(ndev, "set mtu fail, return value %d\n",
+ ret);
+ goto out;
+ }
+
+ /* finally, set new mtu to netdevice */
+ ndev->mtu = new_mtu;
+
+out:
+ if (if_running) {
+ if (hns_nic_net_open(ndev)) {
+ netdev_err(ndev, "hns net open fail\n");
+ ret = -EINVAL;
+ }
+ }
+
+ return ret;
+}
+
+static int hns_nic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ switch (priv->enet_ver) {
+ case AE_VERSION_1:
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6))
+ netdev_info(netdev, "enet v1 do not support tso!\n");
+ break;
+ default:
+ if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
+ priv->ops.fill_desc = fill_tso_desc;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
+ /* The chip only support 7*4096 */
+ netif_set_gso_max_size(netdev, 7 * 4096);
+ } else {
+ priv->ops.fill_desc = fill_v2_desc;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ }
+ break;
+ }
+ netdev->features = features;
+ return 0;
+}
+
+static netdev_features_t hns_nic_fix_features(
+ struct net_device *netdev, netdev_features_t features)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ switch (priv->enet_ver) {
+ case AE_VERSION_1:
+ features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
+ NETIF_F_HW_VLAN_CTAG_FILTER);
+ break;
+ default:
+ break;
+ }
+ return features;
+}
+
+static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->add_uc_addr)
+ return h->dev->ops->add_uc_addr(h, addr);
+
+ return 0;
+}
+
+static int hns_nic_uc_unsync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->rm_uc_addr)
+ return h->dev->ops->rm_uc_addr(h, addr);
+
+ return 0;
+}
+
+/**
+ * hns_set_multicast_list - set mutl mac address
+ * @ndev: net device
+ *
+ * return void
+ */
+static void hns_set_multicast_list(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct netdev_hw_addr *ha = NULL;
+
+ if (!h) {
+ netdev_err(ndev, "hnae handle is null\n");
+ return;
+ }
+
+ if (h->dev->ops->clr_mc_addr)
+ if (h->dev->ops->clr_mc_addr(h))
+ netdev_err(ndev, "clear multicast address fail\n");
+
+ if (h->dev->ops->set_mc_addr) {
+ netdev_for_each_mc_addr(ha, ndev)
+ if (h->dev->ops->set_mc_addr(h, ha->addr))
+ netdev_err(ndev, "set multicast fail\n");
+ }
+}
+
+static void hns_nic_set_rx_mode(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->dev->ops->set_promisc_mode) {
+ if (ndev->flags & IFF_PROMISC)
+ h->dev->ops->set_promisc_mode(h, 1);
+ else
+ h->dev->ops->set_promisc_mode(h, 0);
+ }
+
+ hns_set_multicast_list(ndev);
+
+ if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
+ netdev_err(ndev, "sync uc address fail\n");
+}
+
+static void hns_nic_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ int idx = 0;
+ u64 tx_bytes = 0;
+ u64 rx_bytes = 0;
+ u64 tx_pkts = 0;
+ u64 rx_pkts = 0;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ for (idx = 0; idx < h->q_num; idx++) {
+ tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
+ tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
+ rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
+ rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
+ }
+
+ stats->tx_bytes = tx_bytes;
+ stats->tx_packets = tx_pkts;
+ stats->rx_bytes = rx_bytes;
+ stats->rx_packets = rx_pkts;
+
+ stats->rx_errors = ndev->stats.rx_errors;
+ stats->multicast = ndev->stats.multicast;
+ stats->rx_length_errors = ndev->stats.rx_length_errors;
+ stats->rx_crc_errors = ndev->stats.rx_crc_errors;
+ stats->rx_missed_errors = ndev->stats.rx_missed_errors;
+
+ stats->tx_errors = ndev->stats.tx_errors;
+ stats->rx_dropped = ndev->stats.rx_dropped;
+ stats->tx_dropped = ndev->stats.tx_dropped;
+ stats->collisions = ndev->stats.collisions;
+ stats->rx_over_errors = ndev->stats.rx_over_errors;
+ stats->rx_frame_errors = ndev->stats.rx_frame_errors;
+ stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
+ stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
+ stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
+ stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
+ stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
+ stats->tx_window_errors = ndev->stats.tx_window_errors;
+ stats->rx_compressed = ndev->stats.rx_compressed;
+ stats->tx_compressed = ndev->stats.tx_compressed;
+}
+
+static u16
+hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ /* fix hardware broadcast/multicast packets queue loopback */
+ if (!AE_IS_VER1(priv->enet_ver) &&
+ is_multicast_ether_addr(eth_hdr->h_dest))
+ return 0;
+ else
+ return netdev_pick_tx(ndev, skb, NULL);
+}
+
+static const struct net_device_ops hns_nic_netdev_ops = {
+ .ndo_open = hns_nic_net_open,
+ .ndo_stop = hns_nic_net_stop,
+ .ndo_start_xmit = hns_nic_net_xmit,
+ .ndo_tx_timeout = hns_nic_net_timeout,
+ .ndo_set_mac_address = hns_nic_net_set_mac_address,
+ .ndo_change_mtu = hns_nic_change_mtu,
+ .ndo_do_ioctl = phy_do_ioctl_running,
+ .ndo_set_features = hns_nic_set_features,
+ .ndo_fix_features = hns_nic_fix_features,
+ .ndo_get_stats64 = hns_nic_get_stats64,
+ .ndo_set_rx_mode = hns_nic_set_rx_mode,
+ .ndo_select_queue = hns_nic_select_queue,
+};
+
+static void hns_nic_update_link_status(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (h->phy_dev) {
+ if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
+ return;
+
+ (void)genphy_read_status(h->phy_dev);
+ }
+ hns_nic_adjust_link(netdev);
+}
+
+/* for dumping key regs*/
+static void hns_nic_dump(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+ u32 *data, reg_num, i;
+
+ if (ops->get_regs_len && ops->get_regs) {
+ reg_num = ops->get_regs_len(priv->ae_handle);
+ reg_num = (reg_num + 3ul) & ~3ul;
+ data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
+ if (data) {
+ ops->get_regs(priv->ae_handle, data);
+ for (i = 0; i < reg_num; i += 4)
+ pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ i, data[i], data[i + 1],
+ data[i + 2], data[i + 3]);
+ kfree(data);
+ }
+ }
+
+ for (i = 0; i < h->q_num; i++) {
+ pr_info("tx_queue%d_next_to_clean:%d\n",
+ i, h->qs[i]->tx_ring.next_to_clean);
+ pr_info("tx_queue%d_next_to_use:%d\n",
+ i, h->qs[i]->tx_ring.next_to_use);
+ pr_info("rx_queue%d_next_to_clean:%d\n",
+ i, h->qs[i]->rx_ring.next_to_clean);
+ pr_info("rx_queue%d_next_to_use:%d\n",
+ i, h->qs[i]->rx_ring.next_to_use);
+ }
+}
+
+/* for resetting subtask */
+static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
+{
+ enum hnae_port_type type = priv->ae_handle->port_type;
+
+ if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
+ return;
+ clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+
+ /* If we're already down, removing or resetting, just bail */
+ if (test_bit(NIC_STATE_DOWN, &priv->state) ||
+ test_bit(NIC_STATE_REMOVING, &priv->state) ||
+ test_bit(NIC_STATE_RESETTING, &priv->state))
+ return;
+
+ hns_nic_dump(priv);
+ netdev_info(priv->netdev, "try to reset %s port!\n",
+ (type == HNAE_PORT_DEBUG ? "debug" : "service"));
+
+ rtnl_lock();
+ /* put off any impending NetWatchDogTimeout */
+ netif_trans_update(priv->netdev);
+ hns_nic_net_reinit(priv->netdev);
+
+ rtnl_unlock();
+}
+
+/* for doing service complete*/
+static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
+{
+ WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
+ /* make sure to commit the things */
+ smp_mb__before_atomic();
+ clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+}
+
+static void hns_nic_service_task(struct work_struct *work)
+{
+ struct hns_nic_priv *priv
+ = container_of(work, struct hns_nic_priv, service_task);
+ struct hnae_handle *h = priv->ae_handle;
+
+ hns_nic_reset_subtask(priv);
+ hns_nic_update_link_status(priv->netdev);
+ h->dev->ops->update_led_status(h);
+ hns_nic_update_stats(priv->netdev);
+
+ hns_nic_service_event_complete(priv);
+}
+
+static void hns_nic_task_schedule(struct hns_nic_priv *priv)
+{
+ if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
+ !test_bit(NIC_STATE_REMOVING, &priv->state) &&
+ !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
+ (void)schedule_work(&priv->service_task);
+}
+
+static void hns_nic_service_timer(struct timer_list *t)
+{
+ struct hns_nic_priv *priv = from_timer(priv, t, service_timer);
+
+ (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
+
+ hns_nic_task_schedule(priv);
+}
+
+/**
+ * hns_tx_timeout_reset - initiate reset due to Tx timeout
+ * @priv: driver private struct
+ **/
+static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
+{
+ /* Do the reset outside of interrupt context */
+ if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
+ set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
+ netdev_warn(priv->netdev,
+ "initiating reset due to tx timeout(%llu,0x%lx)\n",
+ priv->tx_timeout_count, priv->state);
+ priv->tx_timeout_count++;
+ hns_nic_task_schedule(priv);
+ }
+}
+
+static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ struct hns_nic_ring_data *rd;
+ bool is_ver1 = AE_IS_VER1(priv->enet_ver);
+ int i;
+
+ if (h->q_num > NIC_MAX_Q_PER_VF) {
+ netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
+ return -EINVAL;
+ }
+
+ priv->ring_data = kzalloc(array3_size(h->q_num,
+ sizeof(*priv->ring_data), 2),
+ GFP_KERNEL);
+ if (!priv->ring_data)
+ return -ENOMEM;
+
+ for (i = 0; i < h->q_num; i++) {
+ rd = &priv->ring_data[i];
+ rd->queue_index = i;
+ rd->ring = &h->qs[i]->tx_ring;
+ rd->poll_one = hns_nic_tx_poll_one;
+ rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
+ hns_nic_tx_fini_pro_v2;
+
+ netif_napi_add(priv->netdev, &rd->napi,
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+ for (i = h->q_num; i < h->q_num * 2; i++) {
+ rd = &priv->ring_data[i];
+ rd->queue_index = i - h->q_num;
+ rd->ring = &h->qs[i - h->q_num]->rx_ring;
+ rd->poll_one = hns_nic_rx_poll_one;
+ rd->ex_process = hns_nic_rx_up_pro;
+ rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
+ hns_nic_rx_fini_pro_v2;
+
+ netif_napi_add(priv->netdev, &rd->napi,
+ hns_nic_common_poll, NAPI_POLL_WEIGHT);
+ rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+
+ return 0;
+}
+
+static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
+{
+ struct hnae_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->q_num * 2; i++) {
+ netif_napi_del(&priv->ring_data[i].napi);
+ if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
+ (void)irq_set_affinity_hint(
+ priv->ring_data[i].ring->irq,
+ NULL);
+ free_irq(priv->ring_data[i].ring->irq,
+ &priv->ring_data[i]);
+ }
+
+ priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
+ }
+ kfree(priv->ring_data);
+}
+
+static void hns_nic_set_priv_ops(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ priv->ops.fill_desc = fill_desc;
+ priv->ops.get_rxd_bnum = get_rx_desc_bnum;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ } else {
+ priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
+ if ((netdev->features & NETIF_F_TSO) ||
+ (netdev->features & NETIF_F_TSO6)) {
+ priv->ops.fill_desc = fill_tso_desc;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
+ /* This chip only support 7*4096 */
+ netif_set_gso_max_size(netdev, 7 * 4096);
+ } else {
+ priv->ops.fill_desc = fill_v2_desc;
+ priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
+ }
+ /* enable tso when init
+ * control tso on/off through TSE bit in bd
+ */
+ h->dev->ops->set_tso_stats(h, 1);
+ }
+}
+
+static int hns_nic_try_get_ae(struct net_device *ndev)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h;
+ int ret;
+
+ h = hnae_get_handle(&priv->netdev->dev,
+ priv->fwnode, priv->port_id, NULL);
+ if (IS_ERR_OR_NULL(h)) {
+ ret = -ENODEV;
+ dev_dbg(priv->dev, "has not handle, register notifier!\n");
+ goto out;
+ }
+ priv->ae_handle = h;
+
+ ret = hns_nic_init_phy(ndev, h);
+ if (ret) {
+ dev_err(priv->dev, "probe phy device fail!\n");
+ goto out_init_phy;
+ }
+
+ ret = hns_nic_init_ring_data(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_init_ring_data;
+ }
+
+ hns_nic_set_priv_ops(ndev);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_ndev_fail;
+ }
+ return 0;
+
+out_reg_ndev_fail:
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+out_init_phy:
+out_init_ring_data:
+ hnae_put_handle(priv->ae_handle);
+ priv->ae_handle = NULL;
+out:
+ return ret;
+}
+
+static int hns_nic_notifier_action(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct hns_nic_priv *priv =
+ container_of(nb, struct hns_nic_priv, notifier_block);
+
+ assert(action == HNAE_AE_REGISTER);
+
+ if (!hns_nic_try_get_ae(priv->netdev)) {
+ hnae_unregister_notifier(&priv->notifier_block);
+ priv->notifier_block.notifier_call = NULL;
+ }
+ return 0;
+}
+
+static int hns_nic_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *ndev;
+ struct hns_nic_priv *priv;
+ u32 port_id;
+ int ret;
+
+ ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->netdev = ndev;
+
+ if (dev_of_node(dev)) {
+ struct device_node *ae_node;
+
+ if (of_device_is_compatible(dev->of_node,
+ "hisilicon,hns-nic-v1"))
+ priv->enet_ver = AE_VERSION_1;
+ else
+ priv->enet_ver = AE_VERSION_2;
+
+ ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
+ if (!ae_node) {
+ ret = -ENODEV;
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = &ae_node->fwnode;
+ } else if (is_acpi_node(dev->fwnode)) {
+ struct fwnode_reference_args args;
+
+ if (acpi_dev_found(hns_enet_acpi_match[0].id))
+ priv->enet_ver = AE_VERSION_1;
+ else if (acpi_dev_found(hns_enet_acpi_match[1].id))
+ priv->enet_ver = AE_VERSION_2;
+ else {
+ ret = -ENXIO;
+ goto out_read_prop_fail;
+ }
+
+ /* try to find port-idx-in-ae first */
+ ret = acpi_node_get_property_reference(dev->fwnode,
+ "ae-handle", 0, &args);
+ if (ret) {
+ dev_err(dev, "not find ae-handle\n");
+ goto out_read_prop_fail;
+ }
+ if (!is_acpi_device_node(args.fwnode)) {
+ ret = -EINVAL;
+ goto out_read_prop_fail;
+ }
+ priv->fwnode = args.fwnode;
+ } else {
+ dev_err(dev, "cannot read cfg data from OF or acpi\n");
+ ret = -ENXIO;
+ goto out_read_prop_fail;
+ }
+
+ ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
+ if (ret) {
+ /* only for old code compatible */
+ ret = device_property_read_u32(dev, "port-id", &port_id);
+ if (ret)
+ goto out_read_prop_fail;
+ /* for old dts, we need to caculate the port offset */
+ port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
+ : port_id - HNS_SRV_OFFSET;
+ }
+ priv->port_id = port_id;
+
+ hns_init_mac_addr(ndev);
+
+ ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hns_nic_netdev_ops;
+ hns_ethtool_set_ops(ndev);
+
+ ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO;
+ ndev->vlan_features |=
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+ ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
+
+ /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
+ ndev->min_mtu = MAC_MIN_MTU;
+ switch (priv->enet_ver) {
+ case AE_VERSION_2:
+ ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
+ ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ ndev->max_mtu = MAC_MAX_MTU_V2 -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ break;
+ default:
+ ndev->max_mtu = MAC_MAX_MTU -
+ (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
+ break;
+ }
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
+ dev_dbg(dev, "set mask to 64bit\n");
+ else
+ dev_err(dev, "set mask to 64bit fail!\n");
+
+ /* carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(ndev);
+
+ timer_setup(&priv->service_timer, hns_nic_service_timer, 0);
+ INIT_WORK(&priv->service_task, hns_nic_service_task);
+
+ set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
+ clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
+ set_bit(NIC_STATE_DOWN, &priv->state);
+
+ if (hns_nic_try_get_ae(priv->netdev)) {
+ priv->notifier_block.notifier_call = hns_nic_notifier_action;
+ ret = hnae_register_notifier(&priv->notifier_block);
+ if (ret) {
+ dev_err(dev, "register notifier fail!\n");
+ goto out_notify_fail;
+ }
+ dev_dbg(dev, "has not handle, register notifier!\n");
+ }
+
+ return 0;
+
+out_notify_fail:
+ (void)cancel_work_sync(&priv->service_task);
+out_read_prop_fail:
+ /* safe for ACPI FW */
+ of_node_put(to_of_node(priv->fwnode));
+ free_netdev(ndev);
+ return ret;
+}
+
+static int hns_nic_dev_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+
+ if (ndev->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(ndev);
+
+ if (priv->ring_data)
+ hns_nic_uninit_ring_data(priv);
+ priv->ring_data = NULL;
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+
+ if (!IS_ERR_OR_NULL(priv->ae_handle))
+ hnae_put_handle(priv->ae_handle);
+ priv->ae_handle = NULL;
+ if (priv->notifier_block.notifier_call)
+ hnae_unregister_notifier(&priv->notifier_block);
+ priv->notifier_block.notifier_call = NULL;
+
+ set_bit(NIC_STATE_REMOVING, &priv->state);
+ (void)cancel_work_sync(&priv->service_task);
+
+ /* safe for ACPI FW */
+ of_node_put(to_of_node(priv->fwnode));
+
+ free_netdev(ndev);
+ return 0;
+}
+
+static const struct of_device_id hns_enet_of_match[] = {
+ {.compatible = "hisilicon,hns-nic-v1",},
+ {.compatible = "hisilicon,hns-nic-v2",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hns_enet_of_match);
+
+static struct platform_driver hns_nic_dev_driver = {
+ .driver = {
+ .name = "hns-nic",
+ .of_match_table = hns_enet_of_match,
+ .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
+ },
+ .probe = hns_nic_dev_probe,
+ .remove = hns_nic_dev_remove,
+};
+
+module_platform_driver(hns_nic_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
+MODULE_AUTHOR("Hisilicon, Inc.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:hns-nic");
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.h b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
new file mode 100644
index 000000000..ffa9d6573
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#ifndef __HNS_ENET_H
+#define __HNS_ENET_H
+
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+
+#include "hnae.h"
+
+#define HNS_DEBUG_OFFSET 6
+#define HNS_SRV_OFFSET 2
+
+enum hns_nic_state {
+ NIC_STATE_TESTING = 0,
+ NIC_STATE_RESETTING,
+ NIC_STATE_REINITING,
+ NIC_STATE_DOWN,
+ NIC_STATE_DISABLED,
+ NIC_STATE_REMOVING,
+ NIC_STATE_SERVICE_INITED,
+ NIC_STATE_SERVICE_SCHED,
+ NIC_STATE2_RESET_REQUESTED,
+ NIC_STATE_MAX
+};
+
+struct hns_nic_ring_data {
+ struct hnae_ring *ring;
+ struct napi_struct napi;
+ cpumask_t mask; /* affinity mask */
+ u32 queue_index;
+ int (*poll_one)(struct hns_nic_ring_data *, int, void *);
+ void (*ex_process)(struct hns_nic_ring_data *, struct sk_buff *);
+ bool (*fini_process)(struct hns_nic_ring_data *);
+};
+
+/* compatible the difference between two versions */
+struct hns_nic_ops {
+ void (*fill_desc)(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu);
+ int (*maybe_stop_tx)(struct sk_buff **out_skb,
+ int *bnum, struct hnae_ring *ring);
+ void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);
+};
+
+struct hns_nic_priv {
+ const struct fwnode_handle *fwnode;
+ u32 enet_ver;
+ u32 port_id;
+ int phy_mode;
+ int phy_led_val;
+ struct net_device *netdev;
+ struct device *dev;
+ struct hnae_handle *ae_handle;
+
+ struct hns_nic_ops ops;
+
+ /* the cb for nic to manage the ring buffer, the first half of the
+ * array is for tx_ring and vice versa for the second half
+ */
+ struct hns_nic_ring_data *ring_data;
+
+ /* The most recently read link state */
+ int link;
+ u64 tx_timeout_count;
+
+ unsigned long state;
+
+ struct timer_list service_timer;
+
+ struct work_struct service_task;
+
+ struct notifier_block notifier_block;
+};
+
+#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
+#define rx_ring_data(priv, idx) \
+ ((priv)->ring_data[(priv)->ae_handle->q_num + (idx)])
+
+void hns_ethtool_set_ops(struct net_device *ndev);
+void hns_nic_net_reset(struct net_device *ndev);
+void hns_nic_net_reinit(struct net_device *netdev);
+int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h);
+netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
+ struct sk_buff *skb,
+ struct hns_nic_ring_data *ring_data);
+
+#endif /**__HNS_ENET_H */
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
new file mode 100644
index 000000000..a6e3f07ca
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -0,0 +1,1304 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include "hns_enet.h"
+
+#define HNS_PHY_PAGE_MDIX 0
+#define HNS_PHY_PAGE_LED 3
+#define HNS_PHY_PAGE_COPPER 0
+
+#define HNS_PHY_PAGE_REG 22 /* Page Selection Reg. */
+#define HNS_PHY_CSC_REG 16 /* Copper Specific Control Register */
+#define HNS_PHY_CSS_REG 17 /* Copper Specific Status Register */
+#define HNS_LED_FC_REG 16 /* LED Function Control Reg. */
+#define HNS_LED_PC_REG 17 /* LED Polarity Control Reg. */
+
+#define HNS_LED_FORCE_ON 9
+#define HNS_LED_FORCE_OFF 8
+
+#define HNS_CHIP_VERSION 660
+#define HNS_NET_STATS_CNT 26
+
+#define PHY_MDIX_CTRL_S (5)
+#define PHY_MDIX_CTRL_M (3 << PHY_MDIX_CTRL_S)
+
+#define PHY_MDIX_STATUS_B (6)
+#define PHY_SPEED_DUP_RESOLVE_B (11)
+
+/**
+ *hns_nic_get_link - get current link status
+ *@net_dev: net_device
+ *retuen 0 - success , negative --fail
+ */
+static u32 hns_nic_get_link(struct net_device *net_dev)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ u32 link_stat = priv->link;
+ struct hnae_handle *h;
+
+ h = priv->ae_handle;
+
+ if (net_dev->phydev) {
+ if (!genphy_read_status(net_dev->phydev))
+ link_stat = net_dev->phydev->link;
+ else
+ link_stat = 0;
+ }
+
+ if (h->dev && h->dev->ops && h->dev->ops->get_status)
+ link_stat = link_stat && h->dev->ops->get_status(h);
+ else
+ link_stat = 0;
+
+ return link_stat;
+}
+
+static void hns_get_mdix_mode(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ int mdix_ctrl, mdix, retval, is_resolved;
+ struct phy_device *phy_dev = net_dev->phydev;
+
+ if (!phy_dev || !phy_dev->mdio.bus) {
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ return;
+ }
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_MDIX);
+
+ retval = phy_read(phy_dev, HNS_PHY_CSC_REG);
+ mdix_ctrl = hnae_get_field(retval, PHY_MDIX_CTRL_M, PHY_MDIX_CTRL_S);
+
+ retval = phy_read(phy_dev, HNS_PHY_CSS_REG);
+ mdix = hnae_get_bit(retval, PHY_MDIX_STATUS_B);
+ is_resolved = hnae_get_bit(retval, PHY_SPEED_DUP_RESOLVE_B);
+
+ phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+
+ switch (mdix_ctrl) {
+ case 0x0:
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI;
+ break;
+ case 0x1:
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case 0x3:
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ default:
+ cmd->base.eth_tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ break;
+ }
+
+ if (!is_resolved)
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_INVALID;
+ else if (mdix)
+ cmd->base.eth_tp_mdix = ETH_TP_MDI_X;
+ else
+ cmd->base.eth_tp_mdix = ETH_TP_MDI;
+}
+
+/**
+ *hns_nic_get_link_ksettings - implement ethtool get link ksettings
+ *@net_dev: net_device
+ *@cmd: ethtool_link_ksettings
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_get_link_ksettings(struct net_device *net_dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ u32 link_stat;
+ int ret;
+ u8 duplex;
+ u16 speed;
+ u32 supported, advertising;
+
+ if (!priv || !priv->ae_handle)
+ return -ESRCH;
+
+ h = priv->ae_handle;
+ if (!h->dev || !h->dev->ops || !h->dev->ops->get_info)
+ return -ESRCH;
+
+ ret = h->dev->ops->get_info(h, NULL, &speed, &duplex);
+ if (ret < 0) {
+ netdev_err(net_dev, "%s get_info error!\n", __func__);
+ return -EINVAL;
+ }
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ /* When there is no phy, autoneg is off. */
+ cmd->base.autoneg = false;
+ cmd->base.speed = speed;
+ cmd->base.duplex = duplex;
+
+ if (net_dev->phydev)
+ phy_ethtool_ksettings_get(net_dev->phydev, cmd);
+
+ link_stat = hns_nic_get_link(net_dev);
+ if (!link_stat) {
+ cmd->base.speed = (u32)SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ if (cmd->base.autoneg)
+ advertising |= ADVERTISED_Autoneg;
+
+ supported |= h->if_support;
+ if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ supported |= SUPPORTED_TP;
+ advertising |= ADVERTISED_1000baseT_Full;
+ } else if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ supported |= SUPPORTED_FIBRE;
+ advertising |= ADVERTISED_10000baseKR_Full;
+ }
+
+ switch (h->media_type) {
+ case HNAE_MEDIA_TYPE_FIBER:
+ cmd->base.port = PORT_FIBRE;
+ break;
+ case HNAE_MEDIA_TYPE_COPPER:
+ cmd->base.port = PORT_TP;
+ break;
+ case HNAE_MEDIA_TYPE_UNKNOWN:
+ default:
+ break;
+ }
+
+ if (!(AE_IS_VER1(priv->enet_ver) && h->port_type == HNAE_PORT_DEBUG))
+ supported |= SUPPORTED_Pause;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C45 | ETH_MDIO_SUPPORTS_C22;
+ hns_get_mdix_mode(net_dev, cmd);
+
+ return 0;
+}
+
+/**
+ *hns_nic_set_link_settings - implement ethtool set link ksettings
+ *@net_dev: net_device
+ *@cmd: ethtool_link_ksettings
+ *retuen 0 - success , negative --fail
+ */
+static int hns_nic_set_link_ksettings(struct net_device *net_dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ u32 speed;
+
+ if (!netif_running(net_dev))
+ return -ESRCH;
+
+ if (!priv || !priv->ae_handle || !priv->ae_handle->dev ||
+ !priv->ae_handle->dev->ops)
+ return -ENODEV;
+
+ h = priv->ae_handle;
+ speed = cmd->base.speed;
+
+ if (h->phy_if == PHY_INTERFACE_MODE_XGMII) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE ||
+ speed != SPEED_10000 ||
+ cmd->base.duplex != DUPLEX_FULL)
+ return -EINVAL;
+ } else if (h->phy_if == PHY_INTERFACE_MODE_SGMII) {
+ if (!net_dev->phydev && cmd->base.autoneg == AUTONEG_ENABLE)
+ return -EINVAL;
+
+ if (speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+ if (net_dev->phydev)
+ return phy_ethtool_ksettings_set(net_dev->phydev, cmd);
+
+ if ((speed != SPEED_10 && speed != SPEED_100 &&
+ speed != SPEED_1000) || (cmd->base.duplex != DUPLEX_HALF &&
+ cmd->base.duplex != DUPLEX_FULL))
+ return -EINVAL;
+ } else {
+ netdev_err(net_dev, "Not supported!");
+ return -ENOTSUPP;
+ }
+
+ if (h->dev->ops->adjust_link) {
+ netif_carrier_off(net_dev);
+ h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+ netif_carrier_on(net_dev);
+ return 0;
+ }
+
+ netdev_err(net_dev, "Not supported!");
+ return -ENOTSUPP;
+}
+
+static const char hns_nic_test_strs[][ETH_GSTRING_LEN] = {
+ "Mac Loopback test",
+ "Serdes Loopback test",
+ "Phy Loopback test"
+};
+
+static int hns_nic_config_phy_loopback(struct phy_device *phy_dev, u8 en)
+{
+ int err;
+
+ if (en) {
+ /* Doing phy loopback in offline state, phy resuming is
+ * needed to power up the device.
+ */
+ err = phy_resume(phy_dev);
+ if (err)
+ goto out;
+
+ err = phy_loopback(phy_dev, true);
+ } else {
+ err = phy_loopback(phy_dev, false);
+ if (err)
+ goto out;
+
+ err = phy_suspend(phy_dev);
+ }
+
+out:
+ return err;
+}
+
+static int __lb_setup(struct net_device *ndev,
+ enum hnae_loop loop)
+{
+ int ret = 0;
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct phy_device *phy_dev = ndev->phydev;
+ struct hnae_handle *h = priv->ae_handle;
+
+ switch (loop) {
+ case MAC_INTERNALLOOP_PHY:
+ ret = hns_nic_config_phy_loopback(phy_dev, 0x1);
+ if (!ret)
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_INTERNALLOOP_MAC:
+ if ((h->dev->ops->set_loopback) &&
+ (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII))
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_INTERNALLOOP_SERDES:
+ if (h->dev->ops->set_loopback)
+ ret = h->dev->ops->set_loopback(h, loop, 0x1);
+ break;
+ case MAC_LOOP_PHY_NONE:
+ ret = hns_nic_config_phy_loopback(phy_dev, 0x0);
+ fallthrough;
+ case MAC_LOOP_NONE:
+ if (!ret && h->dev->ops->set_loopback) {
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ ret = h->dev->ops->set_loopback(h,
+ MAC_INTERNALLOOP_MAC, 0x0);
+
+ if (!ret)
+ ret = h->dev->ops->set_loopback(h,
+ MAC_INTERNALLOOP_SERDES, 0x0);
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ if (!ret) {
+ if (loop == MAC_LOOP_NONE)
+ h->dev->ops->set_promisc_mode(
+ h, ndev->flags & IFF_PROMISC);
+ else
+ h->dev->ops->set_promisc_mode(h, 1);
+ }
+ return ret;
+}
+
+static int __lb_up(struct net_device *ndev,
+ enum hnae_loop loop_mode)
+{
+#define NIC_LB_TEST_WAIT_PHY_LINK_TIME 300
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int speed, duplex;
+ int ret;
+
+ hns_nic_net_reset(ndev);
+
+ ret = __lb_setup(ndev, loop_mode);
+ if (ret)
+ return ret;
+
+ msleep(200);
+
+ ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
+ if (ret)
+ return ret;
+
+ /* link adjust duplex*/
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII)
+ speed = 1000;
+ else
+ speed = 10000;
+ duplex = 1;
+
+ h->dev->ops->adjust_link(h, speed, duplex);
+
+ /* wait adjust link done and phy ready */
+ msleep(NIC_LB_TEST_WAIT_PHY_LINK_TIME);
+
+ return 0;
+}
+
+static void __lb_other_process(struct hns_nic_ring_data *ring_data,
+ struct sk_buff *skb)
+{
+ struct net_device *ndev;
+ struct hns_nic_priv *priv;
+ struct hnae_ring *ring;
+ struct netdev_queue *dev_queue;
+ struct sk_buff *new_skb;
+ unsigned int frame_size;
+ int check_ok;
+ u32 i;
+ char buff[33]; /* 32B data and the last character '\0' */
+
+ if (!ring_data) { /* Just for doing create frame*/
+ ndev = skb->dev;
+ priv = netdev_priv(ndev);
+
+ frame_size = skb->len;
+ memset(skb->data, 0xFF, frame_size);
+ if ((!AE_IS_VER1(priv->enet_ver)) &&
+ (priv->ae_handle->port_type == HNAE_PORT_SERVICE)) {
+ memcpy(skb->data, ndev->dev_addr, 6);
+ skb->data[5] += 0x1f;
+ }
+
+ frame_size &= ~1ul;
+ memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+ memset(&skb->data[frame_size / 2 + 10], 0xBE,
+ frame_size / 2 - 11);
+ memset(&skb->data[frame_size / 2 + 12], 0xAF,
+ frame_size / 2 - 13);
+ return;
+ }
+
+ ring = ring_data->ring;
+ ndev = ring_data->napi.dev;
+ if (is_tx_ring(ring)) { /* for tx queue reset*/
+ dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ return;
+ }
+
+ frame_size = skb->len;
+ frame_size &= ~1ul;
+ /* for mutl buffer*/
+ new_skb = skb_copy(skb, GFP_ATOMIC);
+ dev_kfree_skb_any(skb);
+ if (!new_skb) {
+ netdev_err(ndev, "skb alloc failed\n");
+ return;
+ }
+ skb = new_skb;
+
+ check_ok = 0;
+ if (*(skb->data + 10) == 0xFF) { /* for rx check frame*/
+ if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+ (*(skb->data + frame_size / 2 + 12) == 0xAF))
+ check_ok = 1;
+ }
+
+ if (check_ok) {
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += skb->len;
+ } else {
+ ndev->stats.rx_frame_errors++;
+ for (i = 0; i < skb->len; i++) {
+ snprintf(buff + i % 16 * 2, 3, /* tailing \0*/
+ "%02x", *(skb->data + i));
+ if ((i % 16 == 15) || (i == skb->len - 1))
+ pr_info("%s\n", buff);
+ }
+ }
+ dev_kfree_skb_any(skb);
+}
+
+static int __lb_clean_rings(struct hns_nic_priv *priv,
+ int ringid0, int ringid1, int budget)
+{
+ int i, ret;
+ struct hns_nic_ring_data *ring_data;
+ struct net_device *ndev = priv->netdev;
+ unsigned long rx_packets = ndev->stats.rx_packets;
+ unsigned long rx_bytes = ndev->stats.rx_bytes;
+ unsigned long rx_frame_errors = ndev->stats.rx_frame_errors;
+
+ for (i = ringid0; i <= ringid1; i++) {
+ ring_data = &priv->ring_data[i];
+ (void)ring_data->poll_one(ring_data,
+ budget, __lb_other_process);
+ }
+ ret = (int)(ndev->stats.rx_packets - rx_packets);
+ ndev->stats.rx_packets = rx_packets;
+ ndev->stats.rx_bytes = rx_bytes;
+ ndev->stats.rx_frame_errors = rx_frame_errors;
+ return ret;
+}
+
+/**
+ * __lb_run_test - run loopback test
+ * @ndev: net device
+ * @loop_mode: loopback mode
+ */
+static int __lb_run_test(struct net_device *ndev,
+ enum hnae_loop loop_mode)
+{
+#define NIC_LB_TEST_PKT_NUM_PER_CYCLE 1
+#define NIC_LB_TEST_RING_ID 0
+#define NIC_LB_TEST_FRAME_SIZE 128
+/* nic loopback test err */
+#define NIC_LB_TEST_NO_MEM_ERR 1
+#define NIC_LB_TEST_TX_CNT_ERR 2
+#define NIC_LB_TEST_RX_CNT_ERR 3
+#define NIC_LB_TEST_RX_PKG_ERR 4
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int i, j, lc, good_cnt, ret_val = 0;
+ unsigned int size;
+ netdev_tx_t tx_ret_val;
+ struct sk_buff *skb;
+
+ size = NIC_LB_TEST_FRAME_SIZE;
+ /* allocate test skb */
+ skb = alloc_skb(size, GFP_KERNEL);
+ if (!skb)
+ return NIC_LB_TEST_NO_MEM_ERR;
+
+ /* place data into test skb */
+ (void)skb_put(skb, size);
+ skb->dev = ndev;
+ __lb_other_process(NULL, skb);
+ skb->queue_mapping = NIC_LB_TEST_RING_ID;
+
+ lc = 1;
+ for (j = 0; j < lc; j++) {
+ /* reset count of good packets */
+ good_cnt = 0;
+ /* place 64 packets on the transmit queue*/
+ for (i = 0; i < NIC_LB_TEST_PKT_NUM_PER_CYCLE; i++) {
+ (void)skb_get(skb);
+
+ tx_ret_val = (netdev_tx_t)hns_nic_net_xmit_hw(
+ ndev, skb,
+ &tx_ring_data(priv, skb->queue_mapping));
+ if (tx_ret_val == NETDEV_TX_OK)
+ good_cnt++;
+ else
+ break;
+ }
+ if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+ ret_val = NIC_LB_TEST_TX_CNT_ERR;
+ dev_err(priv->dev, "%s sent fail, cnt=0x%x, budget=0x%x\n",
+ hns_nic_test_strs[loop_mode], good_cnt,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ break;
+ }
+
+ /* allow 100 milliseconds for packets to go from Tx to Rx */
+ msleep(100);
+
+ good_cnt = __lb_clean_rings(priv,
+ h->q_num, h->q_num * 2 - 1,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ if (good_cnt != NIC_LB_TEST_PKT_NUM_PER_CYCLE) {
+ ret_val = NIC_LB_TEST_RX_CNT_ERR;
+ dev_err(priv->dev, "%s recv fail, cnt=0x%x, budget=0x%x\n",
+ hns_nic_test_strs[loop_mode], good_cnt,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ break;
+ }
+ (void)__lb_clean_rings(priv,
+ NIC_LB_TEST_RING_ID, NIC_LB_TEST_RING_ID,
+ NIC_LB_TEST_PKT_NUM_PER_CYCLE);
+ }
+
+ /* free the original skb */
+ kfree_skb(skb);
+
+ return ret_val;
+}
+
+static int __lb_down(struct net_device *ndev, enum hnae_loop loop)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ struct hnae_handle *h = priv->ae_handle;
+ int ret;
+
+ if (loop == MAC_INTERNALLOOP_PHY)
+ ret = __lb_setup(ndev, MAC_LOOP_PHY_NONE);
+ else
+ ret = __lb_setup(ndev, MAC_LOOP_NONE);
+ if (ret)
+ netdev_err(ndev, "%s: __lb_setup return error(%d)!\n",
+ __func__,
+ ret);
+
+ if (h->dev->ops->stop)
+ h->dev->ops->stop(h);
+
+ usleep_range(10000, 20000);
+ (void)__lb_clean_rings(priv, 0, h->q_num - 1, 256);
+
+ hns_nic_net_reset(ndev);
+
+ return 0;
+}
+
+/**
+ * hns_nic_self_test - self test
+ * @ndev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns_nic_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(ndev);
+ bool if_running = netif_running(ndev);
+#define SELF_TEST_TPYE_NUM 3
+ int st_param[SELF_TEST_TPYE_NUM][2];
+ int i;
+ int test_index = 0;
+
+ st_param[0][0] = MAC_INTERNALLOOP_MAC; /* XGE not supported lb */
+ st_param[0][1] = (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII);
+ st_param[1][0] = MAC_INTERNALLOOP_SERDES;
+ st_param[1][1] = 1; /*serdes must exist*/
+ st_param[2][0] = MAC_INTERNALLOOP_PHY; /* only supporte phy node*/
+ st_param[2][1] = ((!!(priv->ae_handle->phy_dev)) &&
+ (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII));
+
+ if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+ set_bit(NIC_STATE_TESTING, &priv->state);
+
+ if (if_running)
+ dev_close(ndev);
+
+ for (i = 0; i < SELF_TEST_TPYE_NUM; i++) {
+ if (!st_param[i][1])
+ continue; /* NEXT testing */
+
+ data[test_index] = __lb_up(ndev,
+ (enum hnae_loop)st_param[i][0]);
+ if (!data[test_index]) {
+ data[test_index] = __lb_run_test(
+ ndev, (enum hnae_loop)st_param[i][0]);
+ (void)__lb_down(ndev,
+ (enum hnae_loop)st_param[i][0]);
+ }
+
+ if (data[test_index])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ test_index++;
+ }
+
+ hns_nic_net_reset(priv->netdev);
+
+ clear_bit(NIC_STATE_TESTING, &priv->state);
+
+ if (if_running)
+ (void)dev_open(ndev, NULL);
+ }
+ /* Online tests aren't run; pass by default */
+
+ (void)msleep_interruptible(4 * 1000);
+}
+
+/**
+ * hns_nic_get_drvinfo - get net driver info
+ * @net_dev: net device
+ * @drvinfo: driver info
+ */
+static void hns_nic_get_drvinfo(struct net_device *net_dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+ strncpy(drvinfo->version, HNAE_DRIVER_VERSION,
+ sizeof(drvinfo->version));
+ drvinfo->version[sizeof(drvinfo->version) - 1] = '\0';
+
+ strncpy(drvinfo->driver, HNAE_DRIVER_NAME, sizeof(drvinfo->driver));
+ drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
+
+ strncpy(drvinfo->bus_info, priv->dev->bus->name,
+ sizeof(drvinfo->bus_info));
+ drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
+
+ strncpy(drvinfo->fw_version, "N/A", ETHTOOL_FWVERS_LEN);
+ drvinfo->eedump_len = 0;
+}
+
+/**
+ * hns_get_ringparam - get ring parameter
+ * @net_dev: net device
+ * @param: ethtool parameter
+ */
+static void hns_get_ringparam(struct net_device *net_dev,
+ struct ethtool_ringparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+ struct hnae_queue *queue;
+ u32 uplimit = 0;
+
+ queue = priv->ae_handle->qs[0];
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->get_ring_bdnum_limit)
+ ops->get_ring_bdnum_limit(queue, &uplimit);
+
+ param->rx_max_pending = uplimit;
+ param->tx_max_pending = uplimit;
+ param->rx_pending = queue->rx_ring.desc_num;
+ param->tx_pending = queue->tx_ring.desc_num;
+}
+
+/**
+ * hns_get_pauseparam - get pause parameter
+ * @net_dev: net device
+ * @param: pause parameter
+ */
+static void hns_get_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ops->get_pauseparam)
+ ops->get_pauseparam(priv->ae_handle, &param->autoneg,
+ &param->rx_pause, &param->tx_pause);
+}
+
+/**
+ * hns_set_pauseparam - set pause parameter
+ * @net_dev: net device
+ * @param: pause parameter
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_set_pauseparam(struct net_device *net_dev,
+ struct ethtool_pauseparam *param)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_handle *h;
+ struct hnae_ae_ops *ops;
+
+ h = priv->ae_handle;
+ ops = h->dev->ops;
+
+ if (!ops->set_pauseparam)
+ return -ESRCH;
+
+ return ops->set_pauseparam(priv->ae_handle, param->autoneg,
+ param->rx_pause, param->tx_pause);
+}
+
+/**
+ * hns_get_coalesce - get coalesce info.
+ * @net_dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_get_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *ec)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ ec->use_adaptive_rx_coalesce = priv->ae_handle->coal_adapt_en;
+ ec->use_adaptive_tx_coalesce = priv->ae_handle->coal_adapt_en;
+
+ if ((!ops->get_coalesce_usecs) ||
+ (!ops->get_max_coalesced_frames))
+ return -ESRCH;
+
+ ops->get_coalesce_usecs(priv->ae_handle,
+ &ec->tx_coalesce_usecs,
+ &ec->rx_coalesce_usecs);
+
+ ops->get_max_coalesced_frames(
+ priv->ae_handle,
+ &ec->tx_max_coalesced_frames,
+ &ec->rx_max_coalesced_frames);
+
+ ops->get_coalesce_range(priv->ae_handle,
+ &ec->tx_max_coalesced_frames_low,
+ &ec->rx_max_coalesced_frames_low,
+ &ec->tx_max_coalesced_frames_high,
+ &ec->rx_max_coalesced_frames_high,
+ &ec->tx_coalesce_usecs_low,
+ &ec->rx_coalesce_usecs_low,
+ &ec->tx_coalesce_usecs_high,
+ &ec->rx_coalesce_usecs_high);
+
+ return 0;
+}
+
+/**
+ * hns_set_coalesce - set coalesce info.
+ * @net_dev: net device
+ * @ec: coalesce info.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_set_coalesce(struct net_device *net_dev,
+ struct ethtool_coalesce *ec)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+ int rc1, rc2;
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (ec->tx_coalesce_usecs != ec->rx_coalesce_usecs)
+ return -EINVAL;
+
+ if ((!ops->set_coalesce_usecs) ||
+ (!ops->set_coalesce_frames))
+ return -ESRCH;
+
+ if (ec->use_adaptive_rx_coalesce != priv->ae_handle->coal_adapt_en)
+ priv->ae_handle->coal_adapt_en = ec->use_adaptive_rx_coalesce;
+
+ rc1 = ops->set_coalesce_usecs(priv->ae_handle,
+ ec->rx_coalesce_usecs);
+
+ rc2 = ops->set_coalesce_frames(priv->ae_handle,
+ ec->tx_max_coalesced_frames,
+ ec->rx_max_coalesced_frames);
+
+ if (rc1 || rc2)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * hns_get_channels - get channel info.
+ * @net_dev: net device
+ * @ch: channel info.
+ */
+static void
+hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+
+ ch->max_rx = priv->ae_handle->q_num;
+ ch->max_tx = priv->ae_handle->q_num;
+
+ ch->rx_count = priv->ae_handle->q_num;
+ ch->tx_count = priv->ae_handle->q_num;
+}
+
+/**
+ * get_ethtool_stats - get detail statistics.
+ * @netdev: net device
+ * @stats: statistics info.
+ * @data: statistics data.
+ */
+static void hns_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u64 *p = data;
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ const struct rtnl_link_stats64 *net_stats;
+ struct rtnl_link_stats64 temp;
+
+ if (!h->dev->ops->get_stats || !h->dev->ops->update_stats) {
+ netdev_err(netdev, "get_stats or update_stats is null!\n");
+ return;
+ }
+
+ h->dev->ops->update_stats(h, &netdev->stats);
+
+ net_stats = dev_get_stats(netdev, &temp);
+
+ /* get netdev statistics */
+ p[0] = net_stats->rx_packets;
+ p[1] = net_stats->tx_packets;
+ p[2] = net_stats->rx_bytes;
+ p[3] = net_stats->tx_bytes;
+ p[4] = net_stats->rx_errors;
+ p[5] = net_stats->tx_errors;
+ p[6] = net_stats->rx_dropped;
+ p[7] = net_stats->tx_dropped;
+ p[8] = net_stats->multicast;
+ p[9] = net_stats->collisions;
+ p[10] = net_stats->rx_over_errors;
+ p[11] = net_stats->rx_crc_errors;
+ p[12] = net_stats->rx_frame_errors;
+ p[13] = net_stats->rx_fifo_errors;
+ p[14] = net_stats->rx_missed_errors;
+ p[15] = net_stats->tx_aborted_errors;
+ p[16] = net_stats->tx_carrier_errors;
+ p[17] = net_stats->tx_fifo_errors;
+ p[18] = net_stats->tx_heartbeat_errors;
+ p[19] = net_stats->rx_length_errors;
+ p[20] = net_stats->tx_window_errors;
+ p[21] = net_stats->rx_compressed;
+ p[22] = net_stats->tx_compressed;
+
+ p[23] = netdev->rx_dropped.counter;
+ p[24] = netdev->tx_dropped.counter;
+
+ p[25] = priv->tx_timeout_count;
+
+ /* get driver statistics */
+ h->dev->ops->get_stats(h, &p[26]);
+}
+
+/**
+ * get_strings: Return a set of strings that describe the requested objects
+ * @netdev: net device
+ * @stringset: string set ID.
+ * @data: objects data.
+ */
+static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ char *buff = (char *)data;
+
+ if (!h->dev->ops->get_strings) {
+ netdev_err(netdev, "h->dev->ops->get_strings is null!\n");
+ return;
+ }
+
+ if (stringset == ETH_SS_TEST) {
+ if (priv->ae_handle->phy_if != PHY_INTERFACE_MODE_XGMII) {
+ memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_MAC],
+ ETH_GSTRING_LEN);
+ buff += ETH_GSTRING_LEN;
+ }
+ memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_SERDES],
+ ETH_GSTRING_LEN);
+ buff += ETH_GSTRING_LEN;
+ if ((netdev->phydev) && (!netdev->phydev->is_c45))
+ memcpy(buff, hns_nic_test_strs[MAC_INTERNALLOOP_PHY],
+ ETH_GSTRING_LEN);
+
+ } else {
+ snprintf(buff, ETH_GSTRING_LEN, "rx_packets");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_packets");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_bytes");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_bytes");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "multicast");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "collisions");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_over_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_crc_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_frame_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_fifo_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_missed_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_aborted_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_carrier_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_fifo_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_heartbeat_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_length_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_window_errors");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "rx_compressed");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "tx_compressed");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "netdev_rx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+ snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_dropped");
+ buff = buff + ETH_GSTRING_LEN;
+
+ snprintf(buff, ETH_GSTRING_LEN, "netdev_tx_timeout");
+ buff = buff + ETH_GSTRING_LEN;
+
+ h->dev->ops->get_strings(h, stringset, (u8 *)buff);
+ }
+}
+
+/**
+ * hns_get_sset_count - get string set count returned by nic_get_strings
+ * @netdev: net device
+ * @stringset: string set index, 0: self test string; 1: statistics string.
+ *
+ * Return string set count.
+ */
+static int hns_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct hnae_ae_ops *ops = h->dev->ops;
+
+ if (!ops->get_sset_count) {
+ netdev_err(netdev, "get_sset_count is null!\n");
+ return -EOPNOTSUPP;
+ }
+ if (stringset == ETH_SS_TEST) {
+ u32 cnt = (sizeof(hns_nic_test_strs) / ETH_GSTRING_LEN);
+
+ if (priv->ae_handle->phy_if == PHY_INTERFACE_MODE_XGMII)
+ cnt--;
+
+ if ((!netdev->phydev) || (netdev->phydev->is_c45))
+ cnt--;
+
+ return cnt;
+ } else if (stringset == ETH_SS_STATS) {
+ return (HNS_NET_STATS_CNT + ops->get_sset_count(h, stringset));
+ } else {
+ return -EOPNOTSUPP;
+ }
+}
+
+/**
+ * hns_phy_led_set - set phy LED status.
+ * @netdev: net device
+ * @value: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int hns_phy_led_set(struct net_device *netdev, int value)
+{
+ int retval;
+ struct phy_device *phy_dev = netdev->phydev;
+
+ retval = phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_LED);
+ retval |= phy_write(phy_dev, HNS_LED_FC_REG, value);
+ retval |= phy_write(phy_dev, HNS_PHY_PAGE_REG, HNS_PHY_PAGE_COPPER);
+ if (retval) {
+ netdev_err(netdev, "mdiobus_write fail !\n");
+ return retval;
+ }
+ return 0;
+}
+
+/**
+ * hns_set_phys_id - set phy identify LED.
+ * @netdev: net device
+ * @state: LED state.
+ *
+ * Return 0 on success, negative on failure.
+ */
+static int
+hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_handle *h = priv->ae_handle;
+ struct phy_device *phy_dev = netdev->phydev;
+ int ret;
+
+ if (phy_dev)
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ ret = phy_write(phy_dev, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_LED);
+ if (ret)
+ return ret;
+
+ priv->phy_led_val = phy_read(phy_dev, HNS_LED_FC_REG);
+
+ ret = phy_write(phy_dev, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+ if (ret)
+ return ret;
+ return 2;
+ case ETHTOOL_ID_ON:
+ ret = hns_phy_led_set(netdev, HNS_LED_FORCE_ON);
+ if (ret)
+ return ret;
+ break;
+ case ETHTOOL_ID_OFF:
+ ret = hns_phy_led_set(netdev, HNS_LED_FORCE_OFF);
+ if (ret)
+ return ret;
+ break;
+ case ETHTOOL_ID_INACTIVE:
+ ret = phy_write(phy_dev, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_LED);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phy_dev, HNS_LED_FC_REG,
+ priv->phy_led_val);
+ if (ret)
+ return ret;
+
+ ret = phy_write(phy_dev, HNS_PHY_PAGE_REG,
+ HNS_PHY_PAGE_COPPER);
+ if (ret)
+ return ret;
+ break;
+ default:
+ return -EINVAL;
+ }
+ else
+ switch (state) {
+ case ETHTOOL_ID_ACTIVE:
+ return h->dev->ops->set_led_id(h, HNAE_LED_ACTIVE);
+ case ETHTOOL_ID_ON:
+ return h->dev->ops->set_led_id(h, HNAE_LED_ON);
+ case ETHTOOL_ID_OFF:
+ return h->dev->ops->set_led_id(h, HNAE_LED_OFF);
+ case ETHTOOL_ID_INACTIVE:
+ return h->dev->ops->set_led_id(h, HNAE_LED_INACTIVE);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_get_regs - get net device register
+ * @net_dev: net device
+ * @cmd: ethtool cmd
+ * @data: register data
+ */
+static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd,
+ void *data)
+{
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+
+ cmd->version = HNS_CHIP_VERSION;
+ if (!ops->get_regs) {
+ netdev_err(net_dev, "ops->get_regs is null!\n");
+ return;
+ }
+ ops->get_regs(priv->ae_handle, data);
+}
+
+/**
+ * hns_get_regs_len - get total register len.
+ * @net_dev: net device
+ *
+ * Return total register len.
+ */
+static int hns_get_regs_len(struct net_device *net_dev)
+{
+ u32 reg_num;
+ struct hns_nic_priv *priv = netdev_priv(net_dev);
+ struct hnae_ae_ops *ops;
+
+ ops = priv->ae_handle->dev->ops;
+ if (!ops->get_regs_len) {
+ netdev_err(net_dev, "ops->get_regs_len is null!\n");
+ return -EOPNOTSUPP;
+ }
+
+ reg_num = ops->get_regs_len(priv->ae_handle);
+ if (reg_num > 0)
+ return reg_num * sizeof(u32);
+ else
+ return reg_num; /* error code */
+}
+
+/**
+ * hns_nic_nway_reset - nway reset
+ * @netdev: net device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_nic_nway_reset(struct net_device *netdev)
+{
+ struct phy_device *phy = netdev->phydev;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (!phy)
+ return -EOPNOTSUPP;
+
+ if (phy->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ return genphy_restart_aneg(phy);
+}
+
+static u32
+hns_get_rss_key_size(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_ae_ops *ops;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ netdev_err(netdev,
+ "RSS feature is not supported on this hardware\n");
+ return 0;
+ }
+
+ ops = priv->ae_handle->dev->ops;
+ return ops->get_rss_key_size(priv->ae_handle);
+}
+
+static u32
+hns_get_rss_indir_size(struct net_device *netdev)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_ae_ops *ops;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ netdev_err(netdev,
+ "RSS feature is not supported on this hardware\n");
+ return 0;
+ }
+
+ ops = priv->ae_handle->dev->ops;
+ return ops->get_rss_indir_size(priv->ae_handle);
+}
+
+static int
+hns_get_rss(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_ae_ops *ops;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ netdev_err(netdev,
+ "RSS feature is not supported on this hardware\n");
+ return -EOPNOTSUPP;
+ }
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (!indir)
+ return 0;
+
+ return ops->get_rss(priv->ae_handle, indir, key, hfunc);
+}
+
+static int
+hns_set_rss(struct net_device *netdev, const u32 *indir, const u8 *key,
+ const u8 hfunc)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+ struct hnae_ae_ops *ops;
+
+ if (AE_IS_VER1(priv->enet_ver)) {
+ netdev_err(netdev,
+ "RSS feature is not supported on this hardware\n");
+ return -EOPNOTSUPP;
+ }
+
+ ops = priv->ae_handle->dev->ops;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) {
+ netdev_err(netdev, "Invalid hfunc!\n");
+ return -EOPNOTSUPP;
+ }
+
+ return ops->set_rss(priv->ae_handle, indir, key, hfunc);
+}
+
+static int hns_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hns_nic_priv *priv = netdev_priv(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = priv->ae_handle->q_num;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct ethtool_ops hns_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES |
+ ETHTOOL_COALESCE_USE_ADAPTIVE |
+ ETHTOOL_COALESCE_USECS_LOW_HIGH |
+ ETHTOOL_COALESCE_MAX_FRAMES_LOW_HIGH,
+ .get_drvinfo = hns_nic_get_drvinfo,
+ .get_link = hns_nic_get_link,
+ .get_ringparam = hns_get_ringparam,
+ .get_pauseparam = hns_get_pauseparam,
+ .set_pauseparam = hns_set_pauseparam,
+ .get_coalesce = hns_get_coalesce,
+ .set_coalesce = hns_set_coalesce,
+ .get_channels = hns_get_channels,
+ .self_test = hns_nic_self_test,
+ .get_strings = hns_get_strings,
+ .get_sset_count = hns_get_sset_count,
+ .get_ethtool_stats = hns_get_ethtool_stats,
+ .set_phys_id = hns_set_phys_id,
+ .get_regs_len = hns_get_regs_len,
+ .get_regs = hns_get_regs,
+ .nway_reset = hns_nic_nway_reset,
+ .get_rxfh_key_size = hns_get_rss_key_size,
+ .get_rxfh_indir_size = hns_get_rss_indir_size,
+ .get_rxfh = hns_get_rss,
+ .set_rxfh = hns_set_rss,
+ .get_rxnfc = hns_get_rxnfc,
+ .get_link_ksettings = hns_nic_get_link_ksettings,
+ .set_link_ksettings = hns_nic_set_link_ksettings,
+};
+
+void hns_ethtool_set_ops(struct net_device *ndev)
+{
+ ndev->ethtool_ops = &hns_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile
new file mode 100644
index 000000000..7aa2fac76
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/Makefile
@@ -0,0 +1,16 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y += -I$(srctree)/$(src)
+
+obj-$(CONFIG_HNS3) += hns3pf/
+obj-$(CONFIG_HNS3) += hns3vf/
+
+obj-$(CONFIG_HNS3) += hnae3.o
+
+obj-$(CONFIG_HNS3_ENET) += hns3.o
+hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o
+
+hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
new file mode 100644
index 000000000..98f55fbe6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h
@@ -0,0 +1,176 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_MBX_H
+#define __HCLGE_MBX_H
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+
+enum HCLGE_MBX_OPCODE {
+ HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */
+ HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/
+ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */
+ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */
+ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */
+ HCLGE_MBX_MAP_RING_TO_VECTOR, /* (VF -> PF) map ring-to-vector */
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR, /* (VF -> PF) unamp ring-to-vector */
+ HCLGE_MBX_SET_PROMISC_MODE, /* (VF -> PF) set promiscuous mode */
+ HCLGE_MBX_SET_MACVLAN, /* (VF -> PF) set unicast filter */
+ HCLGE_MBX_API_NEGOTIATE, /* (VF -> PF) negotiate API version */
+ HCLGE_MBX_GET_QINFO, /* (VF -> PF) get queue config */
+ HCLGE_MBX_GET_QDEPTH, /* (VF -> PF) get queue depth */
+ HCLGE_MBX_GET_TCINFO, /* (VF -> PF) get TC config */
+ HCLGE_MBX_GET_RETA, /* (VF -> PF) get RETA */
+ HCLGE_MBX_GET_RSS_KEY, /* (VF -> PF) get RSS key */
+ HCLGE_MBX_GET_MAC_ADDR, /* (VF -> PF) get MAC addr */
+ HCLGE_MBX_PF_VF_RESP, /* (PF -> VF) generate response to VF */
+ HCLGE_MBX_GET_BDNUM, /* (VF -> PF) get BD num */
+ HCLGE_MBX_GET_BUFSIZE, /* (VF -> PF) get buffer size */
+ HCLGE_MBX_GET_STREAMID, /* (VF -> PF) get stream id */
+ HCLGE_MBX_SET_AESTART, /* (VF -> PF) start ae */
+ HCLGE_MBX_SET_TSOSTATS, /* (VF -> PF) get tso stats */
+ HCLGE_MBX_LINK_STAT_CHANGE, /* (PF -> VF) link status has changed */
+ HCLGE_MBX_GET_BASE_CONFIG, /* (VF -> PF) get config */
+ HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */
+ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */
+ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */
+ HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */
+ HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */
+ HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */
+ HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */
+ HCLGE_MBX_LINK_STAT_MODE, /* (PF -> VF) link mode has changed */
+ HCLGE_MBX_GET_LINK_MODE, /* (VF -> PF) get the link mode of pf */
+ HCLGE_MBX_PUSH_VLAN_INFO, /* (PF -> VF) push port base vlan */
+ HCLGE_MBX_GET_MEDIA_TYPE, /* (VF -> PF) get media type */
+ HCLGE_MBX_PUSH_PROMISC_INFO, /* (PF -> VF) push vf promisc info */
+ HCLGE_MBX_VF_UNINIT, /* (VF -> PF) vf is unintializing */
+ HCLGE_MBX_HANDLE_VF_TBL, /* (VF -> PF) store/clear hw table */
+
+ HCLGE_MBX_GET_VF_FLR_STATUS = 200, /* (M7 -> PF) get vf flr status */
+ HCLGE_MBX_PUSH_LINK_STATUS, /* (M7 -> PF) get port link status */
+ HCLGE_MBX_NCSI_ERROR, /* (M7 -> PF) receive a NCSI error */
+};
+
+/* below are per-VF mac-vlan subcodes */
+enum hclge_mbx_mac_vlan_subcode {
+ HCLGE_MBX_MAC_VLAN_UC_MODIFY = 0, /* modify UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_ADD, /* add a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_UC_REMOVE, /* remove a new UC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_MODIFY, /* modify MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_ADD, /* add new MC mac addr */
+ HCLGE_MBX_MAC_VLAN_MC_REMOVE, /* remove MC mac addr */
+};
+
+/* below are per-VF vlan cfg subcodes */
+enum hclge_mbx_vlan_cfg_subcode {
+ HCLGE_MBX_VLAN_FILTER = 0, /* set vlan filter */
+ HCLGE_MBX_VLAN_TX_OFF_CFG, /* set tx side vlan offload */
+ HCLGE_MBX_VLAN_RX_OFF_CFG, /* set rx side vlan offload */
+ HCLGE_MBX_PORT_BASE_VLAN_CFG, /* set port based vlan configuration */
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE, /* get port based vlan state */
+};
+
+enum hclge_mbx_tbl_cfg_subcode {
+ HCLGE_MBX_VPORT_LIST_CLEAR,
+};
+
+#define HCLGE_MBX_MAX_MSG_SIZE 14
+#define HCLGE_MBX_MAX_RESP_DATA_SIZE 8U
+#define HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM 4
+
+struct hclge_ring_chain_param {
+ u8 ring_type;
+ u8 tqp_index;
+ u8 int_gl_index;
+};
+
+struct hclgevf_mbx_resp_status {
+ struct mutex mbx_mutex; /* protects against contending sync cmd resp */
+ u32 origin_mbx_msg;
+ bool received_resp;
+ int resp_status;
+ u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
+struct hclge_respond_to_vf_msg {
+ int status;
+ u8 data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+ u16 len;
+};
+
+struct hclge_vf_to_pf_msg {
+ u8 code;
+ union {
+ struct {
+ u8 subcode;
+ u8 data[HCLGE_MBX_MAX_MSG_SIZE];
+ };
+ struct {
+ u8 en_bc;
+ u8 en_uc;
+ u8 en_mc;
+ };
+ struct {
+ u8 vector_id;
+ u8 ring_num;
+ struct hclge_ring_chain_param
+ param[HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM];
+ };
+ };
+};
+
+struct hclge_pf_to_vf_msg {
+ u16 code;
+ u16 vf_mbx_msg_code;
+ u16 vf_mbx_msg_subcode;
+ u16 resp_status;
+ u8 resp_data[HCLGE_MBX_MAX_RESP_DATA_SIZE];
+};
+
+struct hclge_mbx_vf_to_pf_cmd {
+ u8 rsv;
+ u8 mbx_src_vfid; /* Auto filled by IMP */
+ u8 mbx_need_resp;
+ u8 rsv1[1];
+ u8 msg_len;
+ u8 rsv2;
+ u16 match_id;
+ struct hclge_vf_to_pf_msg msg;
+};
+
+#define HCLGE_MBX_NEED_RESP_B 0
+
+struct hclge_mbx_pf_to_vf_cmd {
+ u8 dest_vfid;
+ u8 rsv[3];
+ u8 msg_len;
+ u8 rsv1;
+ u16 match_id;
+ struct hclge_pf_to_vf_msg msg;
+};
+
+struct hclge_vf_rst_cmd {
+ u8 dest_vfid;
+ u8 vf_rst;
+ u8 rsv[22];
+};
+
+/* used by VF to store the received Async responses from PF */
+struct hclgevf_mbx_arq_ring {
+#define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8
+#define HCLGE_MBX_MAX_ARQ_MSG_NUM 1024
+ struct hclgevf_dev *hdev;
+ u32 head;
+ u32 tail;
+ atomic_t count;
+ u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
+};
+
+#define hclge_mbx_ring_ptr_move_crq(crq) \
+ (crq->next_to_use = (crq->next_to_use + 1) % crq->desc_num)
+#define hclge_mbx_tail_ptr_move_arq(arq) \
+ (arq.tail = (arq.tail + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
+#define hclge_mbx_head_ptr_move_arq(arq) \
+ (arq.head = (arq.head + 1) % HCLGE_MBX_MAX_ARQ_MSG_NUM)
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.c b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
new file mode 100644
index 000000000..67b0bf310
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.c
@@ -0,0 +1,392 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+
+#include "hnae3.h"
+
+static LIST_HEAD(hnae3_ae_algo_list);
+static LIST_HEAD(hnae3_client_list);
+static LIST_HEAD(hnae3_ae_dev_list);
+
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *pci_id;
+ struct hnae3_ae_dev *ae_dev;
+
+ if (!ae_algo)
+ return;
+
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ pci_id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!pci_id)
+ continue;
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ pci_disable_sriov(ae_dev->pdev);
+ }
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo_prepare);
+
+/* we are keeping things simple and using single lock for all the
+ * list. This is a non-critical code so other updations, if happen
+ * in parallel, can wait.
+ */
+static DEFINE_MUTEX(hnae3_common_lock);
+
+static bool hnae3_client_match(enum hnae3_client_type client_type)
+{
+ if (client_type == HNAE3_CLIENT_KNIC ||
+ client_type == HNAE3_CLIENT_ROCE)
+ return true;
+
+ return false;
+}
+
+void hnae3_set_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev,
+ unsigned int inited)
+{
+ if (!client || !ae_dev)
+ return;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hnae3_set_bit(ae_dev->flag, HNAE3_KNIC_CLIENT_INITED_B, inited);
+ break;
+ case HNAE3_CLIENT_ROCE:
+ hnae3_set_bit(ae_dev->flag, HNAE3_ROCE_CLIENT_INITED_B, inited);
+ break;
+ default:
+ break;
+ }
+}
+EXPORT_SYMBOL(hnae3_set_client_init_flag);
+
+static int hnae3_get_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ int inited = 0;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ inited = hnae3_get_bit(ae_dev->flag,
+ HNAE3_KNIC_CLIENT_INITED_B);
+ break;
+ case HNAE3_CLIENT_ROCE:
+ inited = hnae3_get_bit(ae_dev->flag,
+ HNAE3_ROCE_CLIENT_INITED_B);
+ break;
+ default:
+ break;
+ }
+
+ return inited;
+}
+
+static int hnae3_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ int ret;
+
+ /* check if this client matches the type of ae_dev */
+ if (!(hnae3_client_match(client->type) &&
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))) {
+ return 0;
+ }
+
+ ret = ae_dev->ops->init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "fail to instantiate client, ret = %d\n", ret);
+
+ return ret;
+}
+
+static void hnae3_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ /* check if this client matches the type of ae_dev */
+ if (!(hnae3_client_match(client->type) &&
+ hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B)))
+ return;
+
+ if (hnae3_get_client_init_flag(client, ae_dev)) {
+ ae_dev->ops->uninit_client_instance(client, ae_dev);
+
+ hnae3_set_client_init_flag(client, ae_dev, 0);
+ }
+}
+
+int hnae3_register_client(struct hnae3_client *client)
+{
+ struct hnae3_client *client_tmp;
+ struct hnae3_ae_dev *ae_dev;
+
+ if (!client)
+ return -ENODEV;
+
+ mutex_lock(&hnae3_common_lock);
+ /* one system should only have one client for every type */
+ list_for_each_entry(client_tmp, &hnae3_client_list, node) {
+ if (client_tmp->type == client->type)
+ goto exit;
+ }
+
+ list_add_tail(&client->node, &hnae3_client_list);
+
+ /* initialize the client on every matched port */
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ /* if the client could not be initialized on current port, for
+ * any error reasons, move on to next available port
+ */
+ int ret = hnae3_init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "match and instantiation failed for port, ret = %d\n",
+ ret);
+ }
+
+exit:
+ mutex_unlock(&hnae3_common_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(hnae3_register_client);
+
+void hnae3_unregister_client(struct hnae3_client *client)
+{
+ struct hnae3_client *client_tmp;
+ struct hnae3_ae_dev *ae_dev;
+ bool existed = false;
+
+ if (!client)
+ return;
+
+ mutex_lock(&hnae3_common_lock);
+ /* one system should only have one client for every type */
+ list_for_each_entry(client_tmp, &hnae3_client_list, node) {
+ if (client_tmp->type == client->type) {
+ existed = true;
+ break;
+ }
+ }
+
+ if (!existed) {
+ mutex_unlock(&hnae3_common_lock);
+ pr_err("client %s does not exist!\n", client->name);
+ return;
+ }
+
+ /* un-initialize the client on every matched port */
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ hnae3_uninit_client_instance(client, ae_dev);
+ }
+
+ list_del(&client->node);
+ mutex_unlock(&hnae3_common_lock);
+}
+EXPORT_SYMBOL(hnae3_unregister_client);
+
+/* hnae3_register_ae_algo - register a AE algorithm to hnae3 framework
+ * @ae_algo: AE algorithm
+ * NOTE: the duplicated name will not be checked
+ */
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *id;
+ struct hnae3_ae_dev *ae_dev;
+ struct hnae3_client *client;
+ int ret;
+
+ if (!ae_algo)
+ return;
+
+ mutex_lock(&hnae3_common_lock);
+
+ list_add_tail(&ae_algo->node, &hnae3_ae_algo_list);
+
+ /* Check if this algo/ops matches the list of ae_devs */
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!id)
+ continue;
+
+ if (!ae_algo->ops) {
+ dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
+ continue;
+ }
+ ae_dev->ops = ae_algo->ops;
+
+ ret = ae_algo->ops->init_ae_dev(ae_dev);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
+ continue;
+ }
+
+ /* ae_dev init should set flag */
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+
+ /* check the client list for the match with this ae_dev type and
+ * initialize the figure out client instance
+ */
+ list_for_each_entry(client, &hnae3_client_list, node) {
+ ret = hnae3_init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "match and instantiation failed, ret = %d\n",
+ ret);
+ }
+ }
+
+ mutex_unlock(&hnae3_common_lock);
+}
+EXPORT_SYMBOL(hnae3_register_ae_algo);
+
+/* hnae3_unregister_ae_algo - unregisters a AE algorithm
+ * @ae_algo: the AE algorithm to unregister
+ */
+void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo)
+{
+ const struct pci_device_id *id;
+ struct hnae3_ae_dev *ae_dev;
+ struct hnae3_client *client;
+
+ if (!ae_algo)
+ return;
+
+ mutex_lock(&hnae3_common_lock);
+ /* Check if there are matched ae_dev */
+ list_for_each_entry(ae_dev, &hnae3_ae_dev_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!id)
+ continue;
+
+ /* check the client list for the match with this ae_dev type and
+ * un-initialize the figure out client instance
+ */
+ list_for_each_entry(client, &hnae3_client_list, node)
+ hnae3_uninit_client_instance(client, ae_dev);
+
+ ae_algo->ops->uninit_ae_dev(ae_dev);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
+ }
+
+ list_del(&ae_algo->node);
+ mutex_unlock(&hnae3_common_lock);
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_algo);
+
+/* hnae3_register_ae_dev - registers a AE device to hnae3 framework
+ * @ae_dev: the AE device
+ * NOTE: the duplicated name will not be checked
+ */
+int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ const struct pci_device_id *id;
+ struct hnae3_ae_algo *ae_algo;
+ struct hnae3_client *client;
+ int ret;
+
+ if (!ae_dev)
+ return -ENODEV;
+
+ mutex_lock(&hnae3_common_lock);
+
+ list_add_tail(&ae_dev->node, &hnae3_ae_dev_list);
+
+ /* Check if there are matched ae_algo */
+ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
+ id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!id)
+ continue;
+
+ if (!ae_algo->ops) {
+ dev_err(&ae_dev->pdev->dev, "ae_algo ops are null\n");
+ ret = -EOPNOTSUPP;
+ goto out_err;
+ }
+ ae_dev->ops = ae_algo->ops;
+
+ ret = ae_dev->ops->init_ae_dev(ae_dev);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "init ae_dev error, ret = %d\n", ret);
+ goto out_err;
+ }
+
+ /* ae_dev init should set flag */
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 1);
+ break;
+ }
+
+ /* check the client list for the match with this ae_dev type and
+ * initialize the figure out client instance
+ */
+ list_for_each_entry(client, &hnae3_client_list, node) {
+ ret = hnae3_init_client_instance(client, ae_dev);
+ if (ret)
+ dev_err(&ae_dev->pdev->dev,
+ "match and instantiation failed, ret = %d\n",
+ ret);
+ }
+
+ mutex_unlock(&hnae3_common_lock);
+
+ return 0;
+
+out_err:
+ list_del(&ae_dev->node);
+ mutex_unlock(&hnae3_common_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(hnae3_register_ae_dev);
+
+/* hnae3_unregister_ae_dev - unregisters a AE device
+ * @ae_dev: the AE device to unregister
+ */
+void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ const struct pci_device_id *id;
+ struct hnae3_ae_algo *ae_algo;
+ struct hnae3_client *client;
+
+ if (!ae_dev)
+ return;
+
+ mutex_lock(&hnae3_common_lock);
+ /* Check if there are matched ae_algo */
+ list_for_each_entry(ae_algo, &hnae3_ae_algo_list, node) {
+ if (!hnae3_get_bit(ae_dev->flag, HNAE3_DEV_INITED_B))
+ continue;
+
+ id = pci_match_id(ae_algo->pdev_id_table, ae_dev->pdev);
+ if (!id)
+ continue;
+
+ list_for_each_entry(client, &hnae3_client_list, node)
+ hnae3_uninit_client_instance(client, ae_dev);
+
+ ae_algo->ops->uninit_ae_dev(ae_dev);
+ hnae3_set_bit(ae_dev->flag, HNAE3_DEV_INITED_B, 0);
+ ae_dev->ops = NULL;
+ }
+
+ list_del(&ae_dev->node);
+ mutex_unlock(&hnae3_common_lock);
+}
+EXPORT_SYMBOL(hnae3_unregister_ae_dev);
+
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("HNAE3(Hisilicon Network Acceleration Engine) Framework");
+MODULE_VERSION(HNAE3_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
new file mode 100644
index 000000000..4a9576a44
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h
@@ -0,0 +1,767 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HNAE3_H
+#define __HNAE3_H
+
+/* Names used in this framework:
+ * ae handle (handle):
+ * a set of queues provided by AE
+ * ring buffer queue (rbq):
+ * the channel between upper layer and the AE, can do tx and rx
+ * ring:
+ * a tx or rx channel within a rbq
+ * ring description (desc):
+ * an element in the ring with packet information
+ * buffer:
+ * a memory region referred by desc with the full packet payload
+ *
+ * "num" means a static number set as a parameter, "count" mean a dynamic
+ * number set while running
+ * "cb" means control block
+ */
+
+#include <linux/acpi.h>
+#include <linux/dcbnl.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/types.h>
+
+#define HNAE3_MOD_VERSION "1.0"
+
+#define HNAE3_MIN_VECTOR_NUM 2 /* first one for misc, another for IO */
+
+/* Device version */
+#define HNAE3_DEVICE_VERSION_V1 0x00020
+#define HNAE3_DEVICE_VERSION_V2 0x00021
+#define HNAE3_DEVICE_VERSION_V3 0x00030
+
+#define HNAE3_PCI_REVISION_BIT_SIZE 8
+
+/* Device IDs */
+#define HNAE3_DEV_ID_GE 0xA220
+#define HNAE3_DEV_ID_25GE 0xA221
+#define HNAE3_DEV_ID_25GE_RDMA 0xA222
+#define HNAE3_DEV_ID_25GE_RDMA_MACSEC 0xA223
+#define HNAE3_DEV_ID_50GE_RDMA 0xA224
+#define HNAE3_DEV_ID_50GE_RDMA_MACSEC 0xA225
+#define HNAE3_DEV_ID_100G_RDMA_MACSEC 0xA226
+#define HNAE3_DEV_ID_200G_RDMA 0xA228
+#define HNAE3_DEV_ID_VF 0xA22E
+#define HNAE3_DEV_ID_RDMA_DCB_PFC_VF 0xA22F
+
+#define HNAE3_CLASS_NAME_SIZE 16
+
+#define HNAE3_DEV_INITED_B 0x0
+#define HNAE3_DEV_SUPPORT_ROCE_B 0x1
+#define HNAE3_DEV_SUPPORT_DCB_B 0x2
+#define HNAE3_KNIC_CLIENT_INITED_B 0x3
+#define HNAE3_UNIC_CLIENT_INITED_B 0x4
+#define HNAE3_ROCE_CLIENT_INITED_B 0x5
+
+#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
+ BIT(HNAE3_DEV_SUPPORT_ROCE_B))
+
+#define hnae3_dev_roce_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)
+
+#define hnae3_dev_dcb_supported(hdev) \
+ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_DCB_B)
+
+enum HNAE3_DEV_CAP_BITS {
+ HNAE3_DEV_SUPPORT_FD_B,
+ HNAE3_DEV_SUPPORT_GRO_B,
+ HNAE3_DEV_SUPPORT_FEC_B,
+ HNAE3_DEV_SUPPORT_UDP_GSO_B,
+ HNAE3_DEV_SUPPORT_QB_B,
+ HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B,
+ HNAE3_DEV_SUPPORT_PTP_B,
+ HNAE3_DEV_SUPPORT_INT_QL_B,
+ HNAE3_DEV_SUPPORT_SIMPLE_BD_B,
+ HNAE3_DEV_SUPPORT_TX_PUSH_B,
+ HNAE3_DEV_SUPPORT_PHY_IMP_B,
+ HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B,
+ HNAE3_DEV_SUPPORT_HW_PAD_B,
+ HNAE3_DEV_SUPPORT_STASH_B,
+};
+
+#define hnae3_dev_fd_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_gro_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_fec_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_FEC_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_udp_gso_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_qb_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_QB_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_fd_forward_tc_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_ptp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PTP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_int_ql_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_simple_bd_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_SIMPLE_BD_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_tx_push_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_phy_imp_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_tqp_txrx_indep_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_hw_pad_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_HW_PAD_B, (hdev)->ae_dev->caps)
+
+#define hnae3_dev_stash_supported(hdev) \
+ test_bit(HNAE3_DEV_SUPPORT_STASH_B, (hdev)->ae_dev->caps)
+
+#define hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev) \
+ test_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, (ae_dev)->caps)
+
+#define ring_ptr_move_fw(ring, p) \
+ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
+#define ring_ptr_move_bw(ring, p) \
+ ((ring)->p = ((ring)->p - 1 + (ring)->desc_num) % (ring)->desc_num)
+
+enum hns_desc_type {
+ DESC_TYPE_UNKNOWN,
+ DESC_TYPE_SKB,
+ DESC_TYPE_FRAGLIST_SKB,
+ DESC_TYPE_PAGE,
+};
+
+struct hnae3_handle;
+
+struct hnae3_queue {
+ void __iomem *io_base;
+ struct hnae3_ae_algo *ae_algo;
+ struct hnae3_handle *handle;
+ int tqp_index; /* index in a handle */
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 tx_desc_num; /* total number of tx desc */
+ u16 rx_desc_num; /* total number of rx desc */
+};
+
+struct hns3_mac_stats {
+ u64 tx_pause_cnt;
+ u64 rx_pause_cnt;
+};
+
+/* hnae3 loop mode */
+enum hnae3_loop {
+ HNAE3_LOOP_APP,
+ HNAE3_LOOP_SERIAL_SERDES,
+ HNAE3_LOOP_PARALLEL_SERDES,
+ HNAE3_LOOP_PHY,
+ HNAE3_LOOP_NONE,
+};
+
+enum hnae3_client_type {
+ HNAE3_CLIENT_KNIC,
+ HNAE3_CLIENT_ROCE,
+};
+
+/* mac media type */
+enum hnae3_media_type {
+ HNAE3_MEDIA_TYPE_UNKNOWN,
+ HNAE3_MEDIA_TYPE_FIBER,
+ HNAE3_MEDIA_TYPE_COPPER,
+ HNAE3_MEDIA_TYPE_BACKPLANE,
+ HNAE3_MEDIA_TYPE_NONE,
+};
+
+/* must be consistent with definition in firmware */
+enum hnae3_module_type {
+ HNAE3_MODULE_TYPE_UNKNOWN = 0x00,
+ HNAE3_MODULE_TYPE_FIBRE_LR = 0x01,
+ HNAE3_MODULE_TYPE_FIBRE_SR = 0x02,
+ HNAE3_MODULE_TYPE_AOC = 0x03,
+ HNAE3_MODULE_TYPE_CR = 0x04,
+ HNAE3_MODULE_TYPE_KR = 0x05,
+ HNAE3_MODULE_TYPE_TP = 0x06,
+};
+
+enum hnae3_fec_mode {
+ HNAE3_FEC_AUTO = 0,
+ HNAE3_FEC_BASER,
+ HNAE3_FEC_RS,
+ HNAE3_FEC_USER_DEF,
+};
+
+enum hnae3_reset_notify_type {
+ HNAE3_UP_CLIENT,
+ HNAE3_DOWN_CLIENT,
+ HNAE3_INIT_CLIENT,
+ HNAE3_UNINIT_CLIENT,
+};
+
+enum hnae3_hw_error_type {
+ HNAE3_PPU_POISON_ERROR,
+ HNAE3_CMDQ_ECC_ERROR,
+ HNAE3_IMP_RD_POISON_ERROR,
+ HNAE3_ROCEE_AXI_RESP_ERROR,
+};
+
+enum hnae3_reset_type {
+ HNAE3_VF_RESET,
+ HNAE3_VF_FUNC_RESET,
+ HNAE3_VF_PF_FUNC_RESET,
+ HNAE3_VF_FULL_RESET,
+ HNAE3_FLR_RESET,
+ HNAE3_FUNC_RESET,
+ HNAE3_GLOBAL_RESET,
+ HNAE3_IMP_RESET,
+ HNAE3_UNKNOWN_RESET,
+ HNAE3_NONE_RESET,
+ HNAE3_MAX_RESET,
+};
+
+enum hnae3_port_base_vlan_state {
+ HNAE3_PORT_BASE_VLAN_DISABLE,
+ HNAE3_PORT_BASE_VLAN_ENABLE,
+ HNAE3_PORT_BASE_VLAN_MODIFY,
+ HNAE3_PORT_BASE_VLAN_NOCHANGE,
+};
+
+struct hnae3_vector_info {
+ u8 __iomem *io_addr;
+ int vector;
+};
+
+#define HNAE3_RING_TYPE_B 0
+#define HNAE3_RING_TYPE_TX 0
+#define HNAE3_RING_TYPE_RX 1
+#define HNAE3_RING_GL_IDX_S 0
+#define HNAE3_RING_GL_IDX_M GENMASK(1, 0)
+#define HNAE3_RING_GL_RX 0
+#define HNAE3_RING_GL_TX 1
+
+#define HNAE3_FW_VERSION_BYTE3_SHIFT 24
+#define HNAE3_FW_VERSION_BYTE3_MASK GENMASK(31, 24)
+#define HNAE3_FW_VERSION_BYTE2_SHIFT 16
+#define HNAE3_FW_VERSION_BYTE2_MASK GENMASK(23, 16)
+#define HNAE3_FW_VERSION_BYTE1_SHIFT 8
+#define HNAE3_FW_VERSION_BYTE1_MASK GENMASK(15, 8)
+#define HNAE3_FW_VERSION_BYTE0_SHIFT 0
+#define HNAE3_FW_VERSION_BYTE0_MASK GENMASK(7, 0)
+
+struct hnae3_ring_chain_node {
+ struct hnae3_ring_chain_node *next;
+ u32 tqp_index;
+ u32 flag;
+ u32 int_gl_idx;
+};
+
+#define HNAE3_IS_TX_RING(node) \
+ (((node)->flag & (1 << HNAE3_RING_TYPE_B)) == HNAE3_RING_TYPE_TX)
+
+/* device specification info from firmware */
+struct hnae3_dev_specs {
+ u32 mac_entry_num; /* number of mac-vlan table entry */
+ u32 mng_entry_num; /* number of manager table entry */
+ u32 max_tm_rate;
+ u16 rss_ind_tbl_size;
+ u16 rss_key_size;
+ u16 int_ql_max; /* max value of interrupt coalesce based on INT_QL */
+ u8 max_non_tso_bd_num; /* max BD number of one non-TSO packet */
+};
+
+struct hnae3_client_ops {
+ int (*init_instance)(struct hnae3_handle *handle);
+ void (*uninit_instance)(struct hnae3_handle *handle, bool reset);
+ void (*link_status_change)(struct hnae3_handle *handle, bool state);
+ int (*setup_tc)(struct hnae3_handle *handle, u8 tc);
+ int (*reset_notify)(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type);
+ void (*process_hw_error)(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type);
+};
+
+#define HNAE3_CLIENT_NAME_LENGTH 16
+struct hnae3_client {
+ char name[HNAE3_CLIENT_NAME_LENGTH];
+ unsigned long state;
+ enum hnae3_client_type type;
+ const struct hnae3_client_ops *ops;
+ struct list_head node;
+};
+
+#define HNAE3_DEV_CAPS_MAX_NUM 96
+struct hnae3_ae_dev {
+ struct pci_dev *pdev;
+ const struct hnae3_ae_ops *ops;
+ struct list_head node;
+ u32 flag;
+ unsigned long hw_err_reset_req;
+ struct hnae3_dev_specs dev_specs;
+ u32 dev_version;
+ unsigned long caps[BITS_TO_LONGS(HNAE3_DEV_CAPS_MAX_NUM)];
+ void *priv;
+};
+
+/* This struct defines the operation on the handle.
+ *
+ * init_ae_dev(): (mandatory)
+ * Get PF configure from pci_dev and initialize PF hardware
+ * uninit_ae_dev()
+ * Disable PF device and release PF resource
+ * register_client
+ * Register client to ae_dev
+ * unregister_client()
+ * Unregister client from ae_dev
+ * start()
+ * Enable the hardware
+ * stop()
+ * Disable the hardware
+ * start_client()
+ * Inform the hclge that client has been started
+ * stop_client()
+ * Inform the hclge that client has been stopped
+ * get_status()
+ * Get the carrier state of the back channel of the handle, 1 for ok, 0 for
+ * non-ok
+ * get_ksettings_an_result()
+ * Get negotiation status,speed and duplex
+ * get_media_type()
+ * Get media type of MAC
+ * check_port_speed()
+ * Check target speed whether is supported
+ * adjust_link()
+ * Adjust link status
+ * set_loopback()
+ * Set loopback
+ * set_promisc_mode
+ * Set promisc mode
+ * request_update_promisc_mode
+ * request to hclge(vf) to update promisc mode
+ * set_mtu()
+ * set mtu
+ * get_pauseparam()
+ * get tx and rx of pause frame use
+ * set_pauseparam()
+ * set tx and rx of pause frame use
+ * set_autoneg()
+ * set auto autonegotiation of pause frame use
+ * get_autoneg()
+ * get auto autonegotiation of pause frame use
+ * restart_autoneg()
+ * restart autonegotiation
+ * halt_autoneg()
+ * halt/resume autonegotiation when autonegotiation on
+ * get_coalesce_usecs()
+ * get usecs to delay a TX interrupt after a packet is sent
+ * get_rx_max_coalesced_frames()
+ * get Maximum number of packets to be sent before a TX interrupt.
+ * set_coalesce_usecs()
+ * set usecs to delay a TX interrupt after a packet is sent
+ * set_coalesce_frames()
+ * set Maximum number of packets to be sent before a TX interrupt.
+ * get_mac_addr()
+ * get mac address
+ * set_mac_addr()
+ * set mac address
+ * add_uc_addr
+ * Add unicast addr to mac table
+ * rm_uc_addr
+ * Remove unicast addr from mac table
+ * set_mc_addr()
+ * Set multicast address
+ * add_mc_addr
+ * Add multicast address to mac table
+ * rm_mc_addr
+ * Remove multicast address from mac table
+ * update_stats()
+ * Update Old network device statistics
+ * get_mac_stats()
+ * get mac pause statistics including tx_cnt and rx_cnt
+ * get_ethtool_stats()
+ * Get ethtool network device statistics
+ * get_strings()
+ * Get a set of strings that describe the requested objects
+ * get_sset_count()
+ * Get number of strings that @get_strings will write
+ * update_led_status()
+ * Update the led status
+ * set_led_id()
+ * Set led id
+ * get_regs()
+ * Get regs dump
+ * get_regs_len()
+ * Get the len of the regs dump
+ * get_rss_key_size()
+ * Get rss key size
+ * get_rss_indir_size()
+ * Get rss indirection table size
+ * get_rss()
+ * Get rss table
+ * set_rss()
+ * Set rss table
+ * get_tc_size()
+ * Get tc size of handle
+ * get_vector()
+ * Get vector number and vector information
+ * put_vector()
+ * Put the vector in hdev
+ * map_ring_to_vector()
+ * Map rings to vector
+ * unmap_ring_from_vector()
+ * Unmap rings from vector
+ * reset_queue()
+ * Reset queue
+ * get_fw_version()
+ * Get firmware version
+ * get_mdix_mode()
+ * Get media typr of phy
+ * enable_vlan_filter()
+ * Enable vlan filter
+ * set_vlan_filter()
+ * Set vlan filter config of Ports
+ * set_vf_vlan_filter()
+ * Set vlan filter config of vf
+ * enable_hw_strip_rxvtag()
+ * Enable/disable hardware strip vlan tag of packets received
+ * set_gro_en
+ * Enable/disable HW GRO
+ * add_arfs_entry
+ * Check the 5-tuples of flow, and create flow director rule
+ * get_vf_config
+ * Get the VF configuration setting by the host
+ * set_vf_link_state
+ * Set VF link status
+ * set_vf_spoofchk
+ * Enable/disable spoof check for specified vf
+ * set_vf_trust
+ * Enable/disable trust for specified vf, if the vf being trusted, then
+ * it can enable promisc mode
+ * set_vf_rate
+ * Set the max tx rate of specified vf.
+ * set_vf_mac
+ * Configure the default MAC for specified VF
+ * get_module_eeprom
+ * Get the optical module eeprom info.
+ */
+struct hnae3_ae_ops {
+ int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
+ void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev);
+ void (*flr_prepare)(struct hnae3_ae_dev *ae_dev);
+ void (*flr_done)(struct hnae3_ae_dev *ae_dev);
+ int (*init_client_instance)(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev);
+ void (*uninit_client_instance)(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev);
+ int (*start)(struct hnae3_handle *handle);
+ void (*stop)(struct hnae3_handle *handle);
+ int (*client_start)(struct hnae3_handle *handle);
+ void (*client_stop)(struct hnae3_handle *handle);
+ int (*get_status)(struct hnae3_handle *handle);
+ void (*get_ksettings_an_result)(struct hnae3_handle *handle,
+ u8 *auto_neg, u32 *speed, u8 *duplex);
+
+ int (*cfg_mac_speed_dup_h)(struct hnae3_handle *handle, int speed,
+ u8 duplex);
+
+ void (*get_media_type)(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type);
+ int (*check_port_speed)(struct hnae3_handle *handle, u32 speed);
+ void (*get_fec)(struct hnae3_handle *handle, u8 *fec_ability,
+ u8 *fec_mode);
+ int (*set_fec)(struct hnae3_handle *handle, u32 fec_mode);
+ void (*adjust_link)(struct hnae3_handle *handle, int speed, int duplex);
+ int (*set_loopback)(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en);
+
+ int (*set_promisc_mode)(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc);
+ void (*request_update_promisc_mode)(struct hnae3_handle *handle);
+ int (*set_mtu)(struct hnae3_handle *handle, int new_mtu);
+
+ void (*get_pauseparam)(struct hnae3_handle *handle,
+ u32 *auto_neg, u32 *rx_en, u32 *tx_en);
+ int (*set_pauseparam)(struct hnae3_handle *handle,
+ u32 auto_neg, u32 rx_en, u32 tx_en);
+
+ int (*set_autoneg)(struct hnae3_handle *handle, bool enable);
+ int (*get_autoneg)(struct hnae3_handle *handle);
+ int (*restart_autoneg)(struct hnae3_handle *handle);
+ int (*halt_autoneg)(struct hnae3_handle *handle, bool halt);
+
+ void (*get_coalesce_usecs)(struct hnae3_handle *handle,
+ u32 *tx_usecs, u32 *rx_usecs);
+ void (*get_rx_max_coalesced_frames)(struct hnae3_handle *handle,
+ u32 *tx_frames, u32 *rx_frames);
+ int (*set_coalesce_usecs)(struct hnae3_handle *handle, u32 timeout);
+ int (*set_coalesce_frames)(struct hnae3_handle *handle,
+ u32 coalesce_frames);
+ void (*get_coalesce_range)(struct hnae3_handle *handle,
+ u32 *tx_frames_low, u32 *rx_frames_low,
+ u32 *tx_frames_high, u32 *rx_frames_high,
+ u32 *tx_usecs_low, u32 *rx_usecs_low,
+ u32 *tx_usecs_high, u32 *rx_usecs_high);
+
+ void (*get_mac_addr)(struct hnae3_handle *handle, u8 *p);
+ int (*set_mac_addr)(struct hnae3_handle *handle, void *p,
+ bool is_first);
+ int (*do_ioctl)(struct hnae3_handle *handle,
+ struct ifreq *ifr, int cmd);
+ int (*add_uc_addr)(struct hnae3_handle *handle,
+ const unsigned char *addr);
+ int (*rm_uc_addr)(struct hnae3_handle *handle,
+ const unsigned char *addr);
+ int (*set_mc_addr)(struct hnae3_handle *handle, void *addr);
+ int (*add_mc_addr)(struct hnae3_handle *handle,
+ const unsigned char *addr);
+ int (*rm_mc_addr)(struct hnae3_handle *handle,
+ const unsigned char *addr);
+ void (*set_tso_stats)(struct hnae3_handle *handle, int enable);
+ void (*update_stats)(struct hnae3_handle *handle,
+ struct net_device_stats *net_stats);
+ void (*get_stats)(struct hnae3_handle *handle, u64 *data);
+ void (*get_mac_stats)(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats);
+ void (*get_strings)(struct hnae3_handle *handle,
+ u32 stringset, u8 *data);
+ int (*get_sset_count)(struct hnae3_handle *handle, int stringset);
+
+ void (*get_regs)(struct hnae3_handle *handle, u32 *version,
+ void *data);
+ int (*get_regs_len)(struct hnae3_handle *handle);
+
+ u32 (*get_rss_key_size)(struct hnae3_handle *handle);
+ u32 (*get_rss_indir_size)(struct hnae3_handle *handle);
+ int (*get_rss)(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc);
+ int (*set_rss)(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc);
+ int (*set_rss_tuple)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_rss_tuple)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+
+ int (*get_tc_size)(struct hnae3_handle *handle);
+
+ int (*get_vector)(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info);
+ int (*put_vector)(struct hnae3_handle *handle, int vector_num);
+ int (*map_ring_to_vector)(struct hnae3_handle *handle,
+ int vector_num,
+ struct hnae3_ring_chain_node *vr_chain);
+ int (*unmap_ring_from_vector)(struct hnae3_handle *handle,
+ int vector_num,
+ struct hnae3_ring_chain_node *vr_chain);
+
+ int (*reset_queue)(struct hnae3_handle *handle, u16 queue_id);
+ u32 (*get_fw_version)(struct hnae3_handle *handle);
+ void (*get_mdix_mode)(struct hnae3_handle *handle,
+ u8 *tp_mdix_ctrl, u8 *tp_mdix);
+
+ void (*enable_vlan_filter)(struct hnae3_handle *handle, bool enable);
+ int (*set_vlan_filter)(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill);
+ int (*set_vf_vlan_filter)(struct hnae3_handle *handle, int vfid,
+ u16 vlan, u8 qos, __be16 proto);
+ int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable);
+ void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle);
+ enum hnae3_reset_type (*get_reset_level)(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr);
+ void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type);
+ void (*get_channels)(struct hnae3_handle *handle,
+ struct ethtool_channels *ch);
+ void (*get_tqps_and_rss_info)(struct hnae3_handle *h,
+ u16 *alloc_tqps, u16 *max_rss_size);
+ int (*set_channels)(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured);
+ void (*get_flowctrl_adv)(struct hnae3_handle *handle,
+ u32 *flowctrl_adv);
+ int (*set_led_id)(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status);
+ void (*get_link_mode)(struct hnae3_handle *handle,
+ unsigned long *supported,
+ unsigned long *advertising);
+ int (*add_fd_entry)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*del_fd_entry)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ void (*del_all_fd_entries)(struct hnae3_handle *handle,
+ bool clear_list);
+ int (*get_fd_rule_cnt)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_fd_rule_info)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd);
+ int (*get_fd_all_rules)(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs);
+ void (*enable_fd)(struct hnae3_handle *handle, bool enable);
+ int (*add_arfs_entry)(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys);
+ int (*dbg_run_cmd)(struct hnae3_handle *handle, const char *cmd_buf);
+ pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev);
+ bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
+ bool (*ae_dev_resetting)(struct hnae3_handle *handle);
+ unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
+ int (*set_gro_en)(struct hnae3_handle *handle, bool enable);
+ u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id);
+ void (*set_timer_task)(struct hnae3_handle *handle, bool enable);
+ int (*mac_connect_phy)(struct hnae3_handle *handle);
+ void (*mac_disconnect_phy)(struct hnae3_handle *handle);
+ int (*get_vf_config)(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf);
+ int (*set_vf_link_state)(struct hnae3_handle *handle, int vf,
+ int link_state);
+ int (*set_vf_spoofchk)(struct hnae3_handle *handle, int vf,
+ bool enable);
+ int (*set_vf_trust)(struct hnae3_handle *handle, int vf, bool enable);
+ int (*set_vf_rate)(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force);
+ int (*set_vf_mac)(struct hnae3_handle *handle, int vf, u8 *p);
+ int (*get_module_eeprom)(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data);
+ bool (*get_cmdq_stat)(struct hnae3_handle *handle);
+};
+
+struct hnae3_dcb_ops {
+ /* IEEE 802.1Qaz std */
+ int (*ieee_getets)(struct hnae3_handle *, struct ieee_ets *);
+ int (*ieee_setets)(struct hnae3_handle *, struct ieee_ets *);
+ int (*ieee_getpfc)(struct hnae3_handle *, struct ieee_pfc *);
+ int (*ieee_setpfc)(struct hnae3_handle *, struct ieee_pfc *);
+
+ /* DCBX configuration */
+ u8 (*getdcbx)(struct hnae3_handle *);
+ u8 (*setdcbx)(struct hnae3_handle *, u8);
+
+ int (*setup_tc)(struct hnae3_handle *, u8, u8 *);
+};
+
+struct hnae3_ae_algo {
+ const struct hnae3_ae_ops *ops;
+ struct list_head node;
+ const struct pci_device_id *pdev_id_table;
+};
+
+#define HNAE3_INT_NAME_LEN 32
+#define HNAE3_ITR_COUNTDOWN_START 100
+
+struct hnae3_tc_info {
+ u16 tqp_offset; /* TQP offset from base TQP */
+ u16 tqp_count; /* Total TQPs */
+ u8 tc; /* TC index */
+ bool enable; /* If this TC is enable or not */
+};
+
+#define HNAE3_MAX_TC 8
+#define HNAE3_MAX_USER_PRIO 8
+struct hnae3_knic_private_info {
+ struct net_device *netdev; /* Set by KNIC client when init instance */
+ u16 rss_size; /* Allocated RSS queues */
+ u16 req_rss_size;
+ u16 rx_buf_len;
+ u16 num_tx_desc;
+ u16 num_rx_desc;
+
+ u8 num_tc; /* Total number of enabled TCs */
+ u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */
+ struct hnae3_tc_info tc_info[HNAE3_MAX_TC]; /* Idx of array is HW TC */
+
+ u16 num_tqps; /* total number of TQPs in this handle */
+ struct hnae3_queue **tqp; /* array base of all TQPs in this instance */
+ const struct hnae3_dcb_ops *dcb_ops;
+
+ u16 int_rl_setting;
+ enum pkt_hash_types rss_type;
+};
+
+struct hnae3_roce_private_info {
+ struct net_device *netdev;
+ void __iomem *roce_io_base;
+ int base_vector;
+ int num_vectors;
+
+ /* The below attributes defined for RoCE client, hnae3 gives
+ * initial values to them, and RoCE client can modify and use
+ * them.
+ */
+ unsigned long reset_state;
+ unsigned long instance_state;
+ unsigned long state;
+};
+
+#define HNAE3_SUPPORT_APP_LOOPBACK BIT(0)
+#define HNAE3_SUPPORT_PHY_LOOPBACK BIT(1)
+#define HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK BIT(2)
+#define HNAE3_SUPPORT_VF BIT(3)
+#define HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK BIT(4)
+
+#define HNAE3_USER_UPE BIT(0) /* unicast promisc enabled by user */
+#define HNAE3_USER_MPE BIT(1) /* mulitcast promisc enabled by user */
+#define HNAE3_BPE BIT(2) /* broadcast promisc enable */
+#define HNAE3_OVERFLOW_UPE BIT(3) /* unicast mac vlan overflow */
+#define HNAE3_OVERFLOW_MPE BIT(4) /* multicast mac vlan overflow */
+#define HNAE3_VLAN_FLTR BIT(5) /* enable vlan filter */
+#define HNAE3_UPE (HNAE3_USER_UPE | HNAE3_OVERFLOW_UPE)
+#define HNAE3_MPE (HNAE3_USER_MPE | HNAE3_OVERFLOW_MPE)
+
+struct hnae3_handle {
+ struct hnae3_client *client;
+ struct pci_dev *pdev;
+ void *priv;
+ struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */
+ u64 flags; /* Indicate the capabilities for this handle */
+
+ union {
+ struct net_device *netdev; /* first member */
+ struct hnae3_knic_private_info kinfo;
+ struct hnae3_roce_private_info rinfo;
+ };
+
+ u32 numa_node_mask; /* for multi-chip support */
+
+ enum hnae3_port_base_vlan_state port_base_vlan_state;
+
+ u8 netdev_flags;
+ struct dentry *hnae3_dbgfs;
+
+ /* Network interface message level enabled bits */
+ u32 msg_enable;
+};
+
+#define hnae3_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~(mask)); \
+ (origin) |= ((val) << (shift)) & (mask); \
+ } while (0)
+#define hnae3_get_field(origin, mask, shift) (((origin) & (mask)) >> (shift))
+
+#define hnae3_set_bit(origin, shift, val) \
+ hnae3_set_field((origin), (0x1 << (shift)), (shift), (val))
+#define hnae3_get_bit(origin, shift) \
+ hnae3_get_field((origin), (0x1 << (shift)), (shift))
+
+int hnae3_register_ae_dev(struct hnae3_ae_dev *ae_dev);
+void hnae3_unregister_ae_dev(struct hnae3_ae_dev *ae_dev);
+
+void hnae3_unregister_ae_algo_prepare(struct hnae3_ae_algo *ae_algo);
+void hnae3_unregister_ae_algo(struct hnae3_ae_algo *ae_algo);
+void hnae3_register_ae_algo(struct hnae3_ae_algo *ae_algo);
+
+void hnae3_unregister_client(struct hnae3_client *client);
+int hnae3_register_client(struct hnae3_client *client);
+
+void hnae3_set_client_init_flag(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev,
+ unsigned int inited);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
new file mode 100644
index 000000000..d2ec4c573
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+
+static int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_getets)
+ return h->kinfo.dcb_ops->ieee_getets(h, ets);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setets)
+ return h->kinfo.dcb_ops->ieee_setets(h, ets);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_getpfc)
+ return h->kinfo.dcb_ops->ieee_getpfc(h, pfc);
+
+ return -EOPNOTSUPP;
+}
+
+static int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (h->kinfo.dcb_ops->ieee_setpfc)
+ return h->kinfo.dcb_ops->ieee_setpfc(h, pfc);
+
+ return -EOPNOTSUPP;
+}
+
+/* DCBX configuration */
+static u8 hns3_dcbnl_getdcbx(struct net_device *ndev)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->getdcbx)
+ return h->kinfo.dcb_ops->getdcbx(h);
+
+ return 0;
+}
+
+/* return 0 if successful, otherwise fail */
+static u8 hns3_dcbnl_setdcbx(struct net_device *ndev, u8 mode)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (h->kinfo.dcb_ops->setdcbx)
+ return h->kinfo.dcb_ops->setdcbx(h, mode);
+
+ return 1;
+}
+
+static const struct dcbnl_rtnl_ops hns3_dcbnl_ops = {
+ .ieee_getets = hns3_dcbnl_ieee_getets,
+ .ieee_setets = hns3_dcbnl_ieee_setets,
+ .ieee_getpfc = hns3_dcbnl_ieee_getpfc,
+ .ieee_setpfc = hns3_dcbnl_ieee_setpfc,
+ .getdcbx = hns3_dcbnl_getdcbx,
+ .setdcbx = hns3_dcbnl_setdcbx,
+};
+
+/* hclge_dcbnl_setup - DCBNL setup
+ * @handle: the corresponding vport handle
+ * Set up DCBNL
+ */
+void hns3_dcbnl_setup(struct hnae3_handle *handle)
+{
+ struct net_device *dev = handle->kinfo.netdev;
+
+ if ((!handle->kinfo.dcb_ops) || (handle->flags & HNAE3_SUPPORT_VF))
+ return;
+
+ dev->dcbnl_ops = &hns3_dcbnl_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
new file mode 100644
index 000000000..dc9a85745
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+
+#define HNS3_DBG_READ_LEN 256
+#define HNS3_DBG_WRITE_LEN 1024
+
+static struct dentry *hns3_dbgfs_root;
+
+static int hns3_dbg_queue_info(struct hnae3_handle *h,
+ const char *cmd_buf)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hns3_nic_priv *priv = h->priv;
+ struct hns3_enet_ring *ring;
+ u32 base_add_l, base_add_h;
+ u32 queue_num, queue_max;
+ u32 value, i;
+ int cnt;
+
+ if (!priv->ring) {
+ dev_err(&h->pdev->dev, "priv->ring is NULL\n");
+ return -EFAULT;
+ }
+
+ queue_max = h->kinfo.num_tqps;
+ cnt = kstrtouint(&cmd_buf[11], 0, &queue_num);
+ if (cnt)
+ queue_num = 0;
+ else
+ queue_max = queue_num + 1;
+
+ dev_info(&h->pdev->dev, "queue info\n");
+
+ if (queue_num >= h->kinfo.num_tqps) {
+ dev_err(&h->pdev->dev,
+ "Queue number(%u) is out of range(0-%u)\n", queue_num,
+ h->kinfo.num_tqps - 1);
+ return -EINVAL;
+ }
+
+ for (i = queue_num; i < queue_max; i++) {
+ /* Each cycle needs to determine whether the instance is reset,
+ * to prevent reference to invalid memory. And need to ensure
+ * that the following code is executed within 100ms.
+ */
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return -EPERM;
+
+ ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BASEADDR_L_REG);
+ dev_info(&h->pdev->dev, "RX(%u) BASE ADD: 0x%08x%08x\n", i,
+ base_add_h, base_add_l);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_NUM_REG);
+ dev_info(&h->pdev->dev, "RX(%u) RING BD NUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_BD_LEN_REG);
+ dev_info(&h->pdev->dev, "RX(%u) RING BD LEN: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_TAIL_REG);
+ dev_info(&h->pdev->dev, "RX(%u) RING TAIL: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ dev_info(&h->pdev->dev, "RX(%u) RING HEAD: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_FBDNUM_REG);
+ dev_info(&h->pdev->dev, "RX(%u) RING FBDNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
+ dev_info(&h->pdev->dev, "RX(%u) RING PKTNUM: %u\n", i, value);
+
+ ring = &priv->ring[i];
+ base_add_h = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_H_REG);
+ base_add_l = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BASEADDR_L_REG);
+ dev_info(&h->pdev->dev, "TX(%u) BASE ADD: 0x%08x%08x\n", i,
+ base_add_h, base_add_l);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING BD NUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TC_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING TC: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING TAIL: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING HEAD: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING FBDNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING OFFSET: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_RING_PKTNUM_RECORD_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING PKTNUM: %u\n", i, value);
+
+ value = readl_relaxed(ring->tqp->io_base + HNS3_RING_EN_REG);
+ dev_info(&h->pdev->dev, "TX/RX(%u) RING EN: %s\n", i,
+ value ? "enable" : "disable");
+
+ if (hnae3_ae_dev_tqp_txrx_indep_supported(ae_dev)) {
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_TX_EN_REG);
+ dev_info(&h->pdev->dev, "TX(%u) RING EN: %s\n", i,
+ value ? "enable" : "disable");
+
+ value = readl_relaxed(ring->tqp->io_base +
+ HNS3_RING_RX_EN_REG);
+ dev_info(&h->pdev->dev, "RX(%u) RING EN: %s\n", i,
+ value ? "enable" : "disable");
+ }
+
+ dev_info(&h->pdev->dev, "\n");
+ }
+
+ return 0;
+}
+
+static int hns3_dbg_queue_map(struct hnae3_handle *h)
+{
+ struct hns3_nic_priv *priv = h->priv;
+ int i;
+
+ if (!h->ae_algo->ops->get_global_queue_id)
+ return -EOPNOTSUPP;
+
+ dev_info(&h->pdev->dev, "map info for queue id and vector id\n");
+ dev_info(&h->pdev->dev,
+ "local queue id | global queue id | vector id\n");
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ u16 global_qid;
+
+ global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
+ if (!priv->ring || !priv->ring[i].tqp_vector)
+ continue;
+
+ dev_info(&h->pdev->dev,
+ " %4d %4d %4d\n",
+ i, global_qid, priv->ring[i].tqp_vector->vector_irq);
+ }
+
+ return 0;
+}
+
+static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
+{
+ struct hns3_nic_priv *priv = h->priv;
+ struct hns3_desc *rx_desc, *tx_desc;
+ struct device *dev = &h->pdev->dev;
+ struct hns3_enet_ring *ring;
+ u32 tx_index, rx_index;
+ u32 q_num, value;
+ dma_addr_t addr;
+ int cnt;
+
+ cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index);
+ if (cnt == 2) {
+ rx_index = tx_index;
+ } else if (cnt != 1) {
+ dev_err(dev, "bd info: bad command string, cnt=%d\n", cnt);
+ return -EINVAL;
+ }
+
+ if (q_num >= h->kinfo.num_tqps) {
+ dev_err(dev, "Queue number(%u) is out of range(0-%u)\n", q_num,
+ h->kinfo.num_tqps - 1);
+ return -EINVAL;
+ }
+
+ ring = &priv->ring[q_num];
+ value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+ tx_index = (cnt == 1) ? value : tx_index;
+
+ if (tx_index >= ring->desc_num) {
+ dev_err(dev, "bd index(%u) is out of range(0-%u)\n", tx_index,
+ ring->desc_num - 1);
+ return -EINVAL;
+ }
+
+ tx_desc = &ring->desc[tx_index];
+ addr = le64_to_cpu(tx_desc->addr);
+ dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index);
+ dev_info(dev, "(TX)addr: %pad\n", &addr);
+ dev_info(dev, "(TX)vlan_tag: %u\n", le16_to_cpu(tx_desc->tx.vlan_tag));
+ dev_info(dev, "(TX)send_size: %u\n",
+ le16_to_cpu(tx_desc->tx.send_size));
+ dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso);
+ dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len);
+ dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len);
+ dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len);
+ dev_info(dev, "(TX)vlan_tag: %u\n",
+ le16_to_cpu(tx_desc->tx.outer_vlan_tag));
+ dev_info(dev, "(TX)tv: %u\n", le16_to_cpu(tx_desc->tx.tv));
+ dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec);
+ dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len);
+ dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len);
+ dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len);
+ dev_info(dev, "(TX)paylen: %u\n", le32_to_cpu(tx_desc->tx.paylen));
+ dev_info(dev, "(TX)vld_ra_ri: %u\n",
+ le16_to_cpu(tx_desc->tx.bdtp_fe_sc_vld_ra_ri));
+ dev_info(dev, "(TX)mss: %u\n", le16_to_cpu(tx_desc->tx.mss));
+
+ ring = &priv->ring[q_num + h->kinfo.num_tqps];
+ value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
+ rx_index = (cnt == 1) ? value : tx_index;
+ rx_desc = &ring->desc[rx_index];
+
+ addr = le64_to_cpu(rx_desc->addr);
+ dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index);
+ dev_info(dev, "(RX)addr: %pad\n", &addr);
+ dev_info(dev, "(RX)l234_info: %u\n",
+ le32_to_cpu(rx_desc->rx.l234_info));
+ dev_info(dev, "(RX)pkt_len: %u\n", le16_to_cpu(rx_desc->rx.pkt_len));
+ dev_info(dev, "(RX)size: %u\n", le16_to_cpu(rx_desc->rx.size));
+ dev_info(dev, "(RX)rss_hash: %u\n", le32_to_cpu(rx_desc->rx.rss_hash));
+ dev_info(dev, "(RX)fd_id: %u\n", le16_to_cpu(rx_desc->rx.fd_id));
+ dev_info(dev, "(RX)vlan_tag: %u\n", le16_to_cpu(rx_desc->rx.vlan_tag));
+ dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n",
+ le16_to_cpu(rx_desc->rx.o_dm_vlan_id_fb));
+ dev_info(dev, "(RX)ot_vlan_tag: %u\n",
+ le16_to_cpu(rx_desc->rx.ot_vlan_tag));
+ dev_info(dev, "(RX)bd_base_info: %u\n",
+ le32_to_cpu(rx_desc->rx.bd_base_info));
+
+ return 0;
+}
+
+static void hns3_dbg_help(struct hnae3_handle *h)
+{
+#define HNS3_DBG_BUF_LEN 256
+
+ char printf_buf[HNS3_DBG_BUF_LEN];
+
+ dev_info(&h->pdev->dev, "available commands\n");
+ dev_info(&h->pdev->dev, "queue info <number>\n");
+ dev_info(&h->pdev->dev, "queue map\n");
+ dev_info(&h->pdev->dev, "bd info <q_num> <bd index>\n");
+ dev_info(&h->pdev->dev, "dev capability\n");
+ dev_info(&h->pdev->dev, "dev spec\n");
+
+ if (!hns3_is_phys_func(h->pdev))
+ return;
+
+ dev_info(&h->pdev->dev, "dump fd tcam\n");
+ dev_info(&h->pdev->dev, "dump tc\n");
+ dev_info(&h->pdev->dev, "dump tm map <q_num>\n");
+ dev_info(&h->pdev->dev, "dump tm\n");
+ dev_info(&h->pdev->dev, "dump qos pause cfg\n");
+ dev_info(&h->pdev->dev, "dump qos pri map\n");
+ dev_info(&h->pdev->dev, "dump qos buf cfg\n");
+ dev_info(&h->pdev->dev, "dump mng tbl\n");
+ dev_info(&h->pdev->dev, "dump reset info\n");
+ dev_info(&h->pdev->dev, "dump m7 info\n");
+ dev_info(&h->pdev->dev, "dump ncl_config <offset> <length>(in hex)\n");
+ dev_info(&h->pdev->dev, "dump mac tnl status\n");
+ dev_info(&h->pdev->dev, "dump loopback\n");
+ dev_info(&h->pdev->dev, "dump qs shaper [qs id]\n");
+ dev_info(&h->pdev->dev, "dump uc mac list <func id>\n");
+ dev_info(&h->pdev->dev, "dump mc mac list <func id>\n");
+ dev_info(&h->pdev->dev, "dump intr\n");
+
+ memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
+ strncat(printf_buf, "dump reg [[bios common] [ssu <port_id>]",
+ HNS3_DBG_BUF_LEN - 1);
+ strncat(printf_buf + strlen(printf_buf),
+ " [igu egu <port_id>] [rpu <tc_queue_num>]",
+ HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
+ strncat(printf_buf + strlen(printf_buf),
+ " [rtc] [ppp] [rcb] [tqp <queue_num>] [mac]]\n",
+ HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
+ dev_info(&h->pdev->dev, "%s", printf_buf);
+
+ memset(printf_buf, 0, HNS3_DBG_BUF_LEN);
+ strncat(printf_buf, "dump reg dcb <port_id> <pri_id> <pg_id>",
+ HNS3_DBG_BUF_LEN - 1);
+ strncat(printf_buf + strlen(printf_buf), " <rq_id> <nq_id> <qset_id>\n",
+ HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1);
+ dev_info(&h->pdev->dev, "%s", printf_buf);
+}
+
+static void hns3_dbg_dev_caps(struct hnae3_handle *h)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ unsigned long *caps;
+
+ caps = ae_dev->caps;
+
+ dev_info(&h->pdev->dev, "support FD: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_FD_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support GRO: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_GRO_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support FEC: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_FEC_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support UDP GSO: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support PTP: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_PTP_B, caps) ? "yes" : "no");
+ dev_info(&h->pdev->dev, "support INT QL: %s\n",
+ test_bit(HNAE3_DEV_SUPPORT_INT_QL_B, caps) ? "yes" : "no");
+}
+
+static void hns3_dbg_dev_specs(struct hnae3_handle *h)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ struct hnae3_dev_specs *dev_specs = &ae_dev->dev_specs;
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ struct hns3_nic_priv *priv = h->priv;
+
+ dev_info(priv->dev, "MAC entry num: %u\n", dev_specs->mac_entry_num);
+ dev_info(priv->dev, "MNG entry num: %u\n", dev_specs->mng_entry_num);
+ dev_info(priv->dev, "MAX non tso bd num: %u\n",
+ dev_specs->max_non_tso_bd_num);
+ dev_info(priv->dev, "RSS ind tbl size: %u\n",
+ dev_specs->rss_ind_tbl_size);
+ dev_info(priv->dev, "RSS key size: %u\n", dev_specs->rss_key_size);
+ dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+
+ dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+ dev_info(priv->dev, "MAX INT QL: %u\n", dev_specs->int_ql_max);
+}
+
+static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int uncopy_bytes;
+ char *buf;
+ int len;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count < HNS3_DBG_READ_LEN)
+ return -ENOSPC;
+
+ buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = scnprintf(buf, HNS3_DBG_READ_LEN, "%s\n",
+ "Please echo help to cmd to get help information");
+ uncopy_bytes = copy_to_user(buffer, buf, len);
+
+ kfree(buf);
+
+ if (uncopy_bytes)
+ return -EFAULT;
+
+ return (*ppos = len);
+}
+
+static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct hnae3_handle *handle = filp->private_data;
+ struct hns3_nic_priv *priv = handle->priv;
+ char *cmd_buf, *cmd_buf_tmp;
+ int uncopied_bytes;
+ int ret = 0;
+
+ if (*ppos != 0)
+ return 0;
+
+ /* Judge if the instance is being reset. */
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) ||
+ test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ if (count > HNS3_DBG_WRITE_LEN)
+ return -ENOSPC;
+
+ cmd_buf = kzalloc(count + 1, GFP_KERNEL);
+ if (!cmd_buf)
+ return count;
+
+ uncopied_bytes = copy_from_user(cmd_buf, buffer, count);
+ if (uncopied_bytes) {
+ kfree(cmd_buf);
+ return -EFAULT;
+ }
+
+ cmd_buf[count] = '\0';
+
+ cmd_buf_tmp = strchr(cmd_buf, '\n');
+ if (cmd_buf_tmp) {
+ *cmd_buf_tmp = '\0';
+ count = cmd_buf_tmp - cmd_buf + 1;
+ }
+
+ if (strncmp(cmd_buf, "help", 4) == 0)
+ hns3_dbg_help(handle);
+ else if (strncmp(cmd_buf, "queue info", 10) == 0)
+ ret = hns3_dbg_queue_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "queue map", 9) == 0)
+ ret = hns3_dbg_queue_map(handle);
+ else if (strncmp(cmd_buf, "bd info", 7) == 0)
+ ret = hns3_dbg_bd_info(handle, cmd_buf);
+ else if (strncmp(cmd_buf, "dev capability", 14) == 0)
+ hns3_dbg_dev_caps(handle);
+ else if (strncmp(cmd_buf, "dev spec", 8) == 0)
+ hns3_dbg_dev_specs(handle);
+ else if (handle->ae_algo->ops->dbg_run_cmd)
+ ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
+ else
+ ret = -EOPNOTSUPP;
+
+ if (ret)
+ hns3_dbg_help(handle);
+
+ kfree(cmd_buf);
+ cmd_buf = NULL;
+
+ return count;
+}
+
+static const struct file_operations hns3_dbg_cmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = hns3_dbg_cmd_read,
+ .write = hns3_dbg_cmd_write,
+};
+
+void hns3_dbg_init(struct hnae3_handle *handle)
+{
+ const char *name = pci_name(handle->pdev);
+
+ handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root);
+
+ debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle,
+ &hns3_dbg_cmd_fops);
+}
+
+void hns3_dbg_uninit(struct hnae3_handle *handle)
+{
+ debugfs_remove_recursive(handle->hnae3_dbgfs);
+ handle->hnae3_dbgfs = NULL;
+}
+
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name)
+{
+ hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL);
+}
+
+void hns3_dbg_unregister_debugfs(void)
+{
+ debugfs_remove_recursive(hns3_dbgfs_root);
+ hns3_dbgfs_root = NULL;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
new file mode 100644
index 000000000..a4ab3e7ef
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -0,0 +1,4756 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/interrupt.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
+#include <linux/if_vlan.h>
+#include <linux/irq.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/skbuff.h>
+#include <linux/sctp.h>
+#include <net/gre.h>
+#include <net/ip6_checksum.h>
+#include <net/pkt_cls.h>
+#include <net/tcp.h>
+#include <net/vxlan.h>
+#include <net/geneve.h>
+
+#include "hnae3.h"
+#include "hns3_enet.h"
+/* All hns3 tracepoints are defined by the include below, which
+ * must be included exactly once across the whole kernel with
+ * CREATE_TRACE_POINTS defined
+ */
+#define CREATE_TRACE_POINTS
+#include "hns3_trace.h"
+
+#define hns3_set_field(origin, shift, val) ((origin) |= ((val) << (shift)))
+#define hns3_tx_bd_count(S) DIV_ROUND_UP(S, HNS3_MAX_BD_SIZE)
+
+#define hns3_rl_err(fmt, ...) \
+ do { \
+ if (net_ratelimit()) \
+ netdev_err(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force);
+
+static const char hns3_driver_name[] = "hns3";
+static const char hns3_driver_string[] =
+ "Hisilicon Ethernet Network Driver for Hip08 Family";
+static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
+static struct hnae3_client client;
+
+static int debug = -1;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, " Network interface message level setting");
+
+#define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
+
+#define HNS3_INNER_VLAN_TAG 1
+#define HNS3_OUTER_VLAN_TAG 2
+
+#define HNS3_MIN_TX_LEN 33U
+#define HNS3_MIN_TUN_PKT_LEN 65U
+
+/* hns3_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
+ * Class, Class Mask, private data (not used) }
+ */
+static const struct pci_device_id hns3_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ /* required last entry */
+ {0, }
+};
+MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
+
+static irqreturn_t hns3_irq_handle(int irq, void *vector)
+{
+ struct hns3_enet_tqp_vector *tqp_vector = vector;
+
+ napi_schedule_irqoff(&tqp_vector->napi);
+
+ return IRQ_HANDLED;
+}
+
+static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
+{
+ struct hns3_enet_tqp_vector *tqp_vectors;
+ unsigned int i;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vectors = &priv->tqp_vector[i];
+
+ if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
+ continue;
+
+ /* clear the affinity mask */
+ irq_set_affinity_hint(tqp_vectors->vector_irq, NULL);
+
+ /* release the irq resource */
+ free_irq(tqp_vectors->vector_irq, tqp_vectors);
+ tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
+ }
+}
+
+static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
+{
+ struct hns3_enet_tqp_vector *tqp_vectors;
+ int txrx_int_idx = 0;
+ int rx_int_idx = 0;
+ int tx_int_idx = 0;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vectors = &priv->tqp_vector[i];
+
+ if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
+ continue;
+
+ if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "TxRx", txrx_int_idx++);
+ txrx_int_idx++;
+ } else if (tqp_vectors->rx_group.ring) {
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Rx", rx_int_idx++);
+ } else if (tqp_vectors->tx_group.ring) {
+ snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN,
+ "%s-%s-%s-%d", hns3_driver_name,
+ pci_name(priv->ae_handle->pdev),
+ "Tx", tx_int_idx++);
+ } else {
+ /* Skip this unused q_vector */
+ continue;
+ }
+
+ tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
+
+ irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN);
+ ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
+ tqp_vectors->name, tqp_vectors);
+ if (ret) {
+ netdev_err(priv->netdev, "request irq(%d) fail\n",
+ tqp_vectors->vector_irq);
+ hns3_nic_uninit_irq(priv);
+ return ret;
+ }
+
+ irq_set_affinity_hint(tqp_vectors->vector_irq,
+ &tqp_vectors->affinity_mask);
+
+ tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
+ }
+
+ return 0;
+}
+
+static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 mask_en)
+{
+ writel(mask_en, tqp_vector->mask_addr);
+}
+
+static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ napi_enable(&tqp_vector->napi);
+ enable_irq(tqp_vector->vector_irq);
+
+ /* enable vector */
+ hns3_mask_vector_irq(tqp_vector, 1);
+}
+
+static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ /* disable vector */
+ hns3_mask_vector_irq(tqp_vector, 0);
+
+ disable_irq(tqp_vector->vector_irq);
+ napi_disable(&tqp_vector->napi);
+}
+
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value)
+{
+ u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
+
+ /* this defines the configuration for RL (Interrupt Rate Limiter).
+ * Rl defines rate of interrupts i.e. number of interrupts-per-second
+ * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
+ */
+
+ if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
+ !tqp_vector->rx_group.coal.gl_adapt_enable)
+ /* According to the hardware, the range of rl_reg is
+ * 0-59 and the unit is 4.
+ */
+ rl_reg |= HNS3_INT_RL_ENABLE_MASK;
+
+ writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
+}
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
+}
+
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value)
+{
+ u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
+
+ writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
+}
+
+static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
+{
+ /* initialize the configuration for interrupt coalescing.
+ * 1. GL (Interrupt Gap Limiter)
+ * 2. RL (Interrupt Rate Limiter)
+ *
+ * Default: enable interrupt coalescing self-adaptive and GL
+ */
+ tqp_vector->tx_group.coal.gl_adapt_enable = 1;
+ tqp_vector->rx_group.coal.gl_adapt_enable = 1;
+
+ tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
+ tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
+
+ tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
+ tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
+}
+
+static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tqp_vector->tx_group.coal.int_gl);
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ tqp_vector->rx_group.coal.int_gl);
+ hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
+}
+
+static int hns3_nic_set_real_num_queue(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
+ int i, ret;
+
+ if (kinfo->num_tc <= 1) {
+ netdev_reset_tc(netdev);
+ } else {
+ ret = netdev_set_num_tc(netdev, kinfo->num_tc);
+ if (ret) {
+ netdev_err(netdev,
+ "netdev_set_num_tc fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (!kinfo->tc_info[i].enable)
+ continue;
+
+ netdev_set_tc_queue(netdev,
+ kinfo->tc_info[i].tc,
+ kinfo->tc_info[i].tqp_count,
+ kinfo->tc_info[i].tqp_offset);
+ }
+ }
+
+ ret = netif_set_real_num_tx_queues(netdev, queue_size);
+ if (ret) {
+ netdev_err(netdev,
+ "netif_set_real_num_tx_queues fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ ret = netif_set_real_num_rx_queues(netdev, queue_size);
+ if (ret) {
+ netdev_err(netdev,
+ "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
+{
+ u16 alloc_tqps, max_rss_size, rss_size;
+
+ h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size);
+ rss_size = alloc_tqps / h->kinfo.num_tc;
+
+ return min_t(u16, rss_size, max_rss_size);
+}
+
+static void hns3_tqp_enable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg |= BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_tqp_disable(struct hnae3_queue *tqp)
+{
+ u32 rcb_reg;
+
+ rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG);
+ rcb_reg &= ~BIT(HNS3_RING_EN_B);
+ hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg);
+}
+
+static void hns3_free_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(netdev->rx_cpu_rmap);
+ netdev->rx_cpu_rmap = NULL;
+#endif
+}
+
+static int hns3_set_rx_cpu_rmap(struct net_device *netdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int i, ret;
+
+ if (!netdev->rx_cpu_rmap) {
+ netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num);
+ if (!netdev->rx_cpu_rmap)
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap,
+ tqp_vector->vector_irq);
+ if (ret) {
+ hns3_free_rx_cpu_rmap(netdev);
+ return ret;
+ }
+ }
+#endif
+ return 0;
+}
+
+static int hns3_nic_net_up(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int i, j;
+ int ret;
+
+ ret = hns3_nic_reset_all_ring(h);
+ if (ret)
+ return ret;
+
+ clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
+ /* enable the vectors */
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_enable(&priv->tqp_vector[i]);
+
+ /* enable rcb */
+ for (j = 0; j < h->kinfo.num_tqps; j++)
+ hns3_tqp_enable(h->kinfo.tqp[j]);
+
+ /* start the ae_dev */
+ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
+ if (ret) {
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+ while (j--)
+ hns3_tqp_disable(h->kinfo.tqp[j]);
+
+ for (j = i - 1; j >= 0; j--)
+ hns3_vector_disable(&priv->tqp_vector[j]);
+ }
+
+ return ret;
+}
+
+static void hns3_config_xps(struct hns3_nic_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i];
+ struct hns3_enet_ring *ring = tqp_vector->tx_group.ring;
+
+ while (ring) {
+ int ret;
+
+ ret = netif_set_xps_queue(priv->netdev,
+ &tqp_vector->affinity_mask,
+ ring->tqp->tqp_index);
+ if (ret)
+ netdev_warn(priv->netdev,
+ "set xps queue failed: %d", ret);
+
+ ring = ring->next;
+ }
+ }
+}
+
+static int hns3_nic_net_open(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo;
+ int i, ret;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+ netdev_warn(netdev, "net open repeatedly!\n");
+ return 0;
+ }
+
+ netif_carrier_off(netdev);
+
+ ret = hns3_nic_set_real_num_queue(netdev);
+ if (ret)
+ return ret;
+
+ ret = hns3_nic_net_up(netdev);
+ if (ret) {
+ netdev_err(netdev, "net up fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ kinfo = &h->kinfo;
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ netdev_set_prio_tc_map(netdev, i, kinfo->prio_tc[i]);
+
+ if (h->ae_algo->ops->set_timer_task)
+ h->ae_algo->ops->set_timer_task(priv->ae_handle, true);
+
+ hns3_config_xps(priv);
+
+ netif_dbg(h, drv, netdev, "net open\n");
+
+ return 0;
+}
+
+static void hns3_reset_tx_queue(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct netdev_queue *dev_queue;
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ dev_queue = netdev_get_tx_queue(ndev,
+ priv->ring[i].queue_index);
+ netdev_tx_reset_queue(dev_queue);
+ }
+}
+
+static void hns3_nic_net_down(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops;
+ int i;
+
+ /* disable vectors */
+ for (i = 0; i < priv->vector_num; i++)
+ hns3_vector_disable(&priv->tqp_vector[i]);
+
+ /* disable rcb */
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_tqp_disable(h->kinfo.tqp[i]);
+
+ /* stop ae_dev */
+ ops = priv->ae_handle->ae_algo->ops;
+ if (ops->stop)
+ ops->stop(priv->ae_handle);
+
+ /* delay ring buffer clearing to hns3_reset_notify_uninit_enet
+ * during reset process, because driver may not be able
+ * to disable the ring through firmware when downing the netdev.
+ */
+ if (!hns3_nic_resetting(netdev))
+ hns3_clear_all_ring(priv->ae_handle, false);
+
+ hns3_reset_tx_queue(priv->ae_handle);
+}
+
+static int hns3_nic_net_stop(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return 0;
+
+ netif_dbg(h, drv, netdev, "net stop\n");
+
+ if (h->ae_algo->ops->set_timer_task)
+ h->ae_algo->ops->set_timer_task(priv->ae_handle, false);
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ hns3_nic_net_down(netdev);
+
+ return 0;
+}
+
+static int hns3_nic_uc_sync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->add_uc_addr)
+ return h->ae_algo->ops->add_uc_addr(h, addr);
+
+ return 0;
+}
+
+static int hns3_nic_uc_unsync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ /* need ignore the request of removing device address, because
+ * we store the device address and other addresses of uc list
+ * in the function's mac filter list.
+ */
+ if (ether_addr_equal(addr, netdev->dev_addr))
+ return 0;
+
+ if (h->ae_algo->ops->rm_uc_addr)
+ return h->ae_algo->ops->rm_uc_addr(h, addr);
+
+ return 0;
+}
+
+static int hns3_nic_mc_sync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->add_mc_addr)
+ return h->ae_algo->ops->add_mc_addr(h, addr);
+
+ return 0;
+}
+
+static int hns3_nic_mc_unsync(struct net_device *netdev,
+ const unsigned char *addr)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->rm_mc_addr)
+ return h->ae_algo->ops->rm_mc_addr(h, addr);
+
+ return 0;
+}
+
+static u8 hns3_get_netdev_flags(struct net_device *netdev)
+{
+ u8 flags = 0;
+
+ if (netdev->flags & IFF_PROMISC) {
+ flags = HNAE3_USER_UPE | HNAE3_USER_MPE | HNAE3_BPE;
+ } else {
+ flags |= HNAE3_VLAN_FLTR;
+ if (netdev->flags & IFF_ALLMULTI)
+ flags |= HNAE3_USER_MPE;
+ }
+
+ return flags;
+}
+
+static void hns3_nic_set_rx_mode(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u8 new_flags;
+
+ new_flags = hns3_get_netdev_flags(netdev);
+
+ __dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync);
+ __dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync);
+
+ /* User mode Promisc mode enable and vlan filtering is disabled to
+ * let all packets in.
+ */
+ h->netdev_flags = new_flags;
+ hns3_request_update_promisc_mode(h);
+}
+
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (ops->request_update_promisc_mode)
+ ops->request_update_promisc_mode(handle);
+}
+
+void hns3_enable_vlan_filter(struct net_device *netdev, bool enable)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ bool last_state;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
+ h->ae_algo->ops->enable_vlan_filter) {
+ last_state = h->netdev_flags & HNAE3_VLAN_FLTR ? true : false;
+ if (enable != last_state) {
+ netdev_info(netdev,
+ "%s vlan filter\n",
+ enable ? "enable" : "disable");
+ h->ae_algo->ops->enable_vlan_filter(h, enable);
+ }
+ }
+}
+
+static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
+ u16 *mss, u32 *type_cs_vlan_tso)
+{
+ u32 l4_offset, hdr_len;
+ union l3_hdr_info l3;
+ union l4_hdr_info l4;
+ u32 l4_paylen;
+ int ret;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (unlikely(ret < 0))
+ return ret;
+
+ l3.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* Software should clear the IPv4's checksum field when tso is
+ * needed.
+ */
+ if (l3.v4->version == 4)
+ l3.v4->check = 0;
+
+ /* tunnel packet */
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
+ SKB_GSO_GRE_CSUM |
+ SKB_GSO_UDP_TUNNEL |
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ if ((!(skb_shinfo(skb)->gso_type &
+ SKB_GSO_PARTIAL)) &&
+ (skb_shinfo(skb)->gso_type &
+ SKB_GSO_UDP_TUNNEL_CSUM)) {
+ /* Software should clear the udp's checksum
+ * field when tso is needed.
+ */
+ l4.udp->check = 0;
+ }
+ /* reset l3&l4 pointers from outer to inner headers */
+ l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+
+ /* Software should clear the IPv4's checksum field when
+ * tso is needed.
+ */
+ if (l3.v4->version == 4)
+ l3.v4->check = 0;
+ }
+
+ /* normal or tunnel packet */
+ l4_offset = l4.hdr - skb->data;
+
+ /* remove payload length from inner pseudo checksum when tso */
+ l4_paylen = skb->len - l4_offset;
+
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ hdr_len = sizeof(*l4.udp) + l4_offset;
+ csum_replace_by_diff(&l4.udp->check,
+ (__force __wsum)htonl(l4_paylen));
+ } else {
+ hdr_len = (l4.tcp->doff << 2) + l4_offset;
+ csum_replace_by_diff(&l4.tcp->check,
+ (__force __wsum)htonl(l4_paylen));
+ }
+
+ /* find the txbd field values */
+ *paylen = skb->len - hdr_len;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_TSO_B, 1);
+
+ /* get MSS for TSO */
+ *mss = skb_shinfo(skb)->gso_size;
+
+ trace_hns3_tso(skb);
+
+ return 0;
+}
+
+static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
+ u8 *il4_proto)
+{
+ union l3_hdr_info l3;
+ unsigned char *l4_hdr;
+ unsigned char *exthdr;
+ u8 l4_proto_tmp;
+ __be16 frag_off;
+
+ /* find outer header point */
+ l3.hdr = skb_network_header(skb);
+ l4_hdr = skb_transport_header(skb);
+
+ if (skb->protocol == htons(ETH_P_IPV6)) {
+ exthdr = l3.hdr + sizeof(*l3.v6);
+ l4_proto_tmp = l3.v6->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto_tmp, &frag_off);
+ } else if (skb->protocol == htons(ETH_P_IP)) {
+ l4_proto_tmp = l3.v4->protocol;
+ } else {
+ return -EINVAL;
+ }
+
+ *ol4_proto = l4_proto_tmp;
+
+ /* tunnel packet */
+ if (!skb->encapsulation) {
+ *il4_proto = 0;
+ return 0;
+ }
+
+ /* find inner header point */
+ l3.hdr = skb_inner_network_header(skb);
+ l4_hdr = skb_inner_transport_header(skb);
+
+ if (l3.v6->version == 6) {
+ exthdr = l3.hdr + sizeof(*l3.v6);
+ l4_proto_tmp = l3.v6->nexthdr;
+ if (l4_hdr != exthdr)
+ ipv6_skip_exthdr(skb, exthdr - skb->data,
+ &l4_proto_tmp, &frag_off);
+ } else if (l3.v4->version == 4) {
+ l4_proto_tmp = l3.v4->protocol;
+ }
+
+ *il4_proto = l4_proto_tmp;
+
+ return 0;
+}
+
+/* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
+ * and it is udp packet, which has a dest port as the IANA assigned.
+ * the hardware is expected to do the checksum offload, but the
+ * hardware will not do the checksum offload when udp dest port is
+ * 4789, 4790 or 6081.
+ */
+static bool hns3_tunnel_csum_bug(struct sk_buff *skb)
+{
+ union l4_hdr_info l4;
+
+ l4.hdr = skb_transport_header(skb);
+
+ if (!(!skb->encapsulation &&
+ (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) ||
+ l4.udp->dest == htons(GENEVE_UDP_PORT) ||
+ l4.udp->dest == htons(4790))))
+ return false;
+
+ return true;
+}
+
+static void hns3_set_outer_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u32 *ol_type_vlan_len_msec)
+{
+ u32 l2_len, l3_len, l4_len;
+ unsigned char *il2_hdr;
+ union l3_hdr_info l3;
+ union l4_hdr_info l4;
+
+ l3.hdr = skb_network_header(skb);
+ l4.hdr = skb_transport_header(skb);
+
+ /* compute OL2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - skb->data;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute OL3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ il2_hdr = skb_inner_mac_header(skb);
+ /* compute OL4 header size, defined in 4 Bytes */
+ l4_len = il2_hdr - l4.hdr;
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_S, l4_len >> 2);
+
+ /* define outer network header type */
+ if (skb->protocol == htons(ETH_P_IP)) {
+ if (skb_is_gso(skb))
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_CSUM);
+ else
+ hns3_set_field(*ol_type_vlan_len_msec,
+ HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV4_NO_CSUM);
+
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_S,
+ HNS3_OL3T_IPV6);
+ }
+
+ if (ol4_proto == IPPROTO_UDP)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_MAC_IN_UDP);
+ else if (ol4_proto == IPPROTO_GRE)
+ hns3_set_field(*ol_type_vlan_len_msec, HNS3_TXD_TUNTYPE_S,
+ HNS3_TUN_NVGRE);
+}
+
+static int hns3_set_l2l3l4(struct sk_buff *skb, u8 ol4_proto,
+ u8 il4_proto, u32 *type_cs_vlan_tso,
+ u32 *ol_type_vlan_len_msec)
+{
+ unsigned char *l2_hdr = skb->data;
+ u32 l4_proto = ol4_proto;
+ union l4_hdr_info l4;
+ union l3_hdr_info l3;
+ u32 l2_len, l3_len;
+
+ l4.hdr = skb_transport_header(skb);
+ l3.hdr = skb_network_header(skb);
+
+ /* handle encapsulation skb */
+ if (skb->encapsulation) {
+ /* If this is a not UDP/GRE encapsulation skb */
+ if (!(ol4_proto == IPPROTO_UDP || ol4_proto == IPPROTO_GRE)) {
+ /* drop the skb tunnel packet if hardware don't support,
+ * because hardware can't calculate csum when TSO.
+ */
+ if (skb_is_gso(skb))
+ return -EDOM;
+
+ /* the stack computes the IP header already,
+ * driver calculate l4 checksum when not TSO.
+ */
+ return skb_checksum_help(skb);
+ }
+
+ hns3_set_outer_l2l3l4(skb, ol4_proto, ol_type_vlan_len_msec);
+
+ /* switch to inner header */
+ l2_hdr = skb_inner_mac_header(skb);
+ l3.hdr = skb_inner_network_header(skb);
+ l4.hdr = skb_inner_transport_header(skb);
+ l4_proto = il4_proto;
+ }
+
+ if (l3.v4->version == 4) {
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
+ HNS3_L3T_IPV4);
+
+ /* the stack computes the IP header already, the only time we
+ * need the hardware to recompute it is in the case of TSO.
+ */
+ if (skb_is_gso(skb))
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
+ } else if (l3.v6->version == 6) {
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_S,
+ HNS3_L3T_IPV6);
+ }
+
+ /* compute inner(/normal) L2 header size, defined in 2 Bytes */
+ l2_len = l3.hdr - l2_hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_S, l2_len >> 1);
+
+ /* compute inner(/normal) L3 header size, defined in 4 Bytes */
+ l3_len = l4.hdr - l3.hdr;
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_S, l3_len >> 2);
+
+ /* compute inner(/normal) L4 header size, defined in 4 Bytes */
+ switch (l4_proto) {
+ case IPPROTO_TCP:
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_TCP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ l4.tcp->doff);
+ break;
+ case IPPROTO_UDP:
+ if (hns3_tunnel_csum_bug(skb)) {
+ int ret = skb_put_padto(skb, HNS3_MIN_TUN_PKT_LEN);
+
+ return ret ? ret : skb_checksum_help(skb);
+ }
+
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_UDP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct udphdr) >> 2));
+ break;
+ case IPPROTO_SCTP:
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4T_S,
+ HNS3_L4T_SCTP);
+ hns3_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_S,
+ (sizeof(struct sctphdr) >> 2));
+ break;
+ default:
+ /* drop the skb tunnel packet if hardware don't support,
+ * because hardware can't calculate csum when TSO.
+ */
+ if (skb_is_gso(skb))
+ return -EDOM;
+
+ /* the stack computes the IP header already,
+ * driver calculate l4 checksum when not TSO.
+ */
+ return skb_checksum_help(skb);
+ }
+
+ return 0;
+}
+
+static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring,
+ struct sk_buff *skb)
+{
+ struct hnae3_handle *handle = tx_ring->tqp->handle;
+ struct vlan_ethhdr *vhdr;
+ int rc;
+
+ if (!(skb->protocol == htons(ETH_P_8021Q) ||
+ skb_vlan_tag_present(skb)))
+ return 0;
+
+ /* Since HW limitation, if port based insert VLAN enabled, only one VLAN
+ * header is allowed in skb, otherwise it will cause RAS error.
+ */
+ if (unlikely(skb_vlan_tagged_multi(skb) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_ENABLE))
+ return -EINVAL;
+
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) {
+ /* When HW VLAN acceleration is turned off, and the stack
+ * sets the protocol to 802.1q, the driver just need to
+ * set the protocol to the encapsulated ethertype.
+ */
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ /* Based on hw strategy, use out_vtag in two layer tag case,
+ * and use inner_vtag in one tag case.
+ */
+ if (skb->protocol == htons(ETH_P_8021Q) &&
+ handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ rc = HNS3_OUTER_VLAN_TAG;
+ else
+ rc = HNS3_INNER_VLAN_TAG;
+
+ skb->protocol = vlan_get_protocol(skb);
+ return rc;
+ }
+
+ rc = skb_cow_head(skb, 0);
+ if (unlikely(rc < 0))
+ return rc;
+
+ vhdr = skb_vlan_eth_hdr(skb);
+ vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT)
+ & VLAN_PRIO_MASK);
+
+ skb->protocol = vlan_get_protocol(skb);
+ return 0;
+}
+
+static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, struct hns3_desc *desc)
+{
+ u32 ol_type_vlan_len_msec = 0;
+ u32 type_cs_vlan_tso = 0;
+ u32 paylen = skb->len;
+ u16 inner_vtag = 0;
+ u16 out_vtag = 0;
+ u16 mss = 0;
+ int ret;
+
+ ret = hns3_handle_vtags(ring, skb);
+ if (unlikely(ret < 0)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_vlan_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ } else if (ret == HNS3_INNER_VLAN_TAG) {
+ inner_vtag = skb_vlan_tag_get(skb);
+ inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
+ } else if (ret == HNS3_OUTER_VLAN_TAG) {
+ out_vtag = skb_vlan_tag_get(skb);
+ out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
+ VLAN_PRIO_MASK;
+ hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
+ 1);
+ }
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ u8 ol4_proto, il4_proto;
+
+ skb_reset_mac_len(skb);
+
+ ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
+ if (unlikely(ret < 0)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l4_proto_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
+ &type_cs_vlan_tso,
+ &ol_type_vlan_len_msec);
+ if (unlikely(ret < 0)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_l2l3l4_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ ret = hns3_set_tso(skb, &paylen, &mss,
+ &type_cs_vlan_tso);
+ if (unlikely(ret < 0)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_tso_err++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+ }
+
+ /* Set txbd */
+ desc->tx.ol_type_vlan_len_msec =
+ cpu_to_le32(ol_type_vlan_len_msec);
+ desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso);
+ desc->tx.paylen = cpu_to_le32(paylen);
+ desc->tx.mss = cpu_to_le16(mss);
+ desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
+ desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
+
+ return 0;
+}
+
+static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
+ unsigned int size, enum hns_desc_type type)
+{
+#define HNS3_LIKELY_BD_NUM 1
+
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
+ struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+ struct device *dev = ring_to_dev(ring);
+ skb_frag_t *frag;
+ unsigned int frag_buf_num;
+ int k, sizeoflast;
+ dma_addr_t dma;
+
+ if (type == DESC_TYPE_FRAGLIST_SKB ||
+ type == DESC_TYPE_SKB) {
+ struct sk_buff *skb = (struct sk_buff *)priv;
+
+ dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
+ } else {
+ frag = (skb_frag_t *)priv;
+ dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
+ }
+
+ if (unlikely(dma_mapping_error(dev, dma))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ desc_cb->priv = priv;
+ desc_cb->length = size;
+ desc_cb->dma = dma;
+ desc_cb->type = type;
+
+ if (likely(size <= HNS3_MAX_BD_SIZE)) {
+ desc->addr = cpu_to_le64(dma);
+ desc->tx.send_size = cpu_to_le16(size);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+
+ trace_hns3_tx_desc(ring, ring->next_to_use);
+ ring_ptr_move_fw(ring, next_to_use);
+ return HNS3_LIKELY_BD_NUM;
+ }
+
+ frag_buf_num = hns3_tx_bd_count(size);
+ sizeoflast = size % HNS3_MAX_BD_SIZE;
+ sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
+
+ /* When frag size is bigger than hardware limit, split this frag */
+ for (k = 0; k < frag_buf_num; k++) {
+ /* now, fill the descriptor */
+ desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
+ desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
+ (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
+ desc->tx.bdtp_fe_sc_vld_ra_ri =
+ cpu_to_le16(BIT(HNS3_TXD_VLD_B));
+
+ trace_hns3_tx_desc(ring, ring->next_to_use);
+ /* move ring pointer to next */
+ ring_ptr_move_fw(ring, next_to_use);
+
+ desc = &ring->desc[ring->next_to_use];
+ }
+
+ return frag_buf_num;
+}
+
+static unsigned int hns3_skb_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num)
+{
+ unsigned int size;
+ int i;
+
+ size = skb_headlen(skb);
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
+
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ if (size) {
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ size = skb_frag_size(frag);
+ if (!size)
+ continue;
+
+ while (size > HNS3_MAX_BD_SIZE) {
+ bd_size[bd_num++] = HNS3_MAX_BD_SIZE;
+ size -= HNS3_MAX_BD_SIZE;
+
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ bd_size[bd_num++] = size;
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ return bd_num;
+}
+
+static unsigned int hns3_tx_bd_num(struct sk_buff *skb, unsigned int *bd_size,
+ u8 max_non_tso_bd_num, unsigned int bd_num,
+ unsigned int recursion_level)
+{
+#define HNS3_MAX_RECURSION_LEVEL 24
+
+ struct sk_buff *frag_skb;
+
+ /* If the total len is within the max bd limit */
+ if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level &&
+ !skb_has_frag_list(skb) &&
+ skb_shinfo(skb)->nr_frags < max_non_tso_bd_num))
+ return skb_shinfo(skb)->nr_frags + 1U;
+
+ if (unlikely(recursion_level >= HNS3_MAX_RECURSION_LEVEL))
+ return UINT_MAX;
+
+ bd_num = hns3_skb_bd_num(skb, bd_size, bd_num);
+
+ if (!skb_has_frag_list(skb) || bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+
+ skb_walk_frags(skb, frag_skb) {
+ bd_num = hns3_tx_bd_num(frag_skb, bd_size, max_non_tso_bd_num,
+ bd_num, recursion_level + 1);
+ if (bd_num > HNS3_MAX_TSO_BD_NUM)
+ return bd_num;
+ }
+
+ return bd_num;
+}
+
+static unsigned int hns3_gso_hdr_len(struct sk_buff *skb)
+{
+ if (!skb->encapsulation)
+ return skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ return skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb);
+}
+
+/* HW need every continuous max_non_tso_bd_num buffer data to be larger
+ * than MSS, we simplify it by ensuring skb_headlen + the first continuous
+ * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
+ * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
+ * than MSS except the last max_non_tso_bd_num - 1 frags.
+ */
+static bool hns3_skb_need_linearized(struct sk_buff *skb, unsigned int *bd_size,
+ unsigned int bd_num, u8 max_non_tso_bd_num)
+{
+ unsigned int tot_len = 0;
+ int i;
+
+ for (i = 0; i < max_non_tso_bd_num - 1U; i++)
+ tot_len += bd_size[i];
+
+ /* ensure the first max_non_tso_bd_num frags is greater than
+ * mss + header
+ */
+ if (tot_len + bd_size[max_non_tso_bd_num - 1U] <
+ skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb))
+ return true;
+
+ /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater
+ * than mss except the last one.
+ */
+ for (i = 0; i < bd_num - max_non_tso_bd_num; i++) {
+ tot_len -= bd_size[i];
+ tot_len += bd_size[i + max_non_tso_bd_num - 1U];
+
+ if (tot_len < skb_shinfo(skb)->gso_size)
+ return true;
+ }
+
+ return false;
+}
+
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size)
+{
+ int i;
+
+ for (i = 0; i < MAX_SKB_FRAGS; i++)
+ size[i] = skb_frag_size(&shinfo->frags[i]);
+}
+
+static int hns3_skb_linearize(struct hns3_enet_ring *ring,
+ struct sk_buff *skb,
+ unsigned int bd_num)
+{
+ /* 'bd_num == UINT_MAX' means the skb' fraglist has a
+ * recursion level of over HNS3_MAX_RECURSION_LEVEL.
+ */
+ if (bd_num == UINT_MAX) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.over_max_recursion++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ /* The skb->len has exceeded the hw limitation, linearization
+ * will not help.
+ */
+ if (skb->len > HNS3_MAX_TSO_SIZE ||
+ (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.hw_limitation++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ if (__skb_linearize(skb)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
+ struct net_device *netdev,
+ struct sk_buff *skb)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ u8 max_non_tso_bd_num = priv->max_non_tso_bd_num;
+ unsigned int bd_size[HNS3_MAX_TSO_BD_NUM + 1U];
+ unsigned int bd_num;
+
+ bd_num = hns3_tx_bd_num(skb, bd_size, max_non_tso_bd_num, 0, 0);
+ if (unlikely(bd_num > max_non_tso_bd_num)) {
+ if (bd_num <= HNS3_MAX_TSO_BD_NUM && skb_is_gso(skb) &&
+ !hns3_skb_need_linearized(skb, bd_size, bd_num,
+ max_non_tso_bd_num)) {
+ trace_hns3_over_max_bd(skb);
+ goto out;
+ }
+
+ if (hns3_skb_linearize(ring, skb, bd_num))
+ return -ENOMEM;
+
+ bd_num = hns3_tx_bd_count(skb->len);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_copy++;
+ u64_stats_update_end(&ring->syncp);
+ }
+
+out:
+ if (likely(ring_space(ring) >= bd_num))
+ return bd_num;
+
+ netif_stop_subqueue(netdev, ring->queue_index);
+ smp_mb(); /* Memory barrier before checking ring_space */
+
+ /* Start queue in case hns3_clean_tx_ring has just made room
+ * available and has not seen the queue stopped state performed
+ * by netif_stop_subqueue above.
+ */
+ if (ring_space(ring) >= bd_num && netif_carrier_ok(netdev) &&
+ !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+ netif_start_subqueue(netdev, ring->queue_index);
+ return bd_num;
+ }
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_busy++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EBUSY;
+}
+
+static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
+{
+ struct device *dev = ring_to_dev(ring);
+ unsigned int i;
+
+ for (i = 0; i < ring->desc_num; i++) {
+ struct hns3_desc *desc = &ring->desc[ring->next_to_use];
+
+ memset(desc, 0, sizeof(*desc));
+
+ /* check if this is where we started */
+ if (ring->next_to_use == next_to_use_orig)
+ break;
+
+ /* rollback one */
+ ring_ptr_move_bw(ring, next_to_use);
+
+ if (!ring->desc_cb[ring->next_to_use].dma)
+ continue;
+
+ /* unmap the descriptor dma address */
+ if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB ||
+ ring->desc_cb[ring->next_to_use].type ==
+ DESC_TYPE_FRAGLIST_SKB)
+ dma_unmap_single(dev,
+ ring->desc_cb[ring->next_to_use].dma,
+ ring->desc_cb[ring->next_to_use].length,
+ DMA_TO_DEVICE);
+ else if (ring->desc_cb[ring->next_to_use].length)
+ dma_unmap_page(dev,
+ ring->desc_cb[ring->next_to_use].dma,
+ ring->desc_cb[ring->next_to_use].length,
+ DMA_TO_DEVICE);
+
+ ring->desc_cb[ring->next_to_use].length = 0;
+ ring->desc_cb[ring->next_to_use].dma = 0;
+ ring->desc_cb[ring->next_to_use].type = DESC_TYPE_UNKNOWN;
+ }
+}
+
+static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, enum hns_desc_type type)
+{
+ unsigned int size = skb_headlen(skb);
+ struct sk_buff *frag_skb;
+ int i, ret, bd_num = 0;
+
+ if (size) {
+ ret = hns3_fill_desc(ring, skb, size, type);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ size = skb_frag_size(frag);
+ if (!size)
+ continue;
+
+ ret = hns3_fill_desc(ring, frag, size, DESC_TYPE_PAGE);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ skb_walk_frags(skb, frag_skb) {
+ ret = hns3_fill_skb_to_desc(ring, frag_skb,
+ DESC_TYPE_FRAGLIST_SKB);
+ if (unlikely(ret < 0))
+ return ret;
+
+ bd_num += ret;
+ }
+
+ return bd_num;
+}
+
+static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
+ bool doorbell)
+{
+ ring->pending_buf += num;
+
+ if (!doorbell) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_more++;
+ u64_stats_update_end(&ring->syncp);
+ return;
+ }
+
+ if (!ring->pending_buf)
+ return;
+
+ writel(ring->pending_buf,
+ ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
+ ring->pending_buf = 0;
+ WRITE_ONCE(ring->last_to_use, ring->next_to_use);
+}
+
+netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
+ struct netdev_queue *dev_queue;
+ int pre_ntu, next_to_use_head;
+ bool doorbell;
+ int ret;
+
+ /* Hardware can only handle short frames above 32 bytes */
+ if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
+ return NETDEV_TX_OK;
+ }
+
+ /* Prefetch the data used later */
+ prefetch(skb->data);
+
+ ret = hns3_nic_maybe_stop_tx(ring, netdev, skb);
+ if (unlikely(ret <= 0)) {
+ if (ret == -EBUSY) {
+ hns3_tx_doorbell(ring, 0, true);
+ return NETDEV_TX_BUSY;
+ }
+
+ hns3_rl_err(netdev, "xmit error: %d!\n", ret);
+ goto out_err_tx_ok;
+ }
+
+ next_to_use_head = ring->next_to_use;
+
+ ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
+ if (unlikely(ret < 0))
+ goto fill_err;
+
+ /* 'ret < 0' means filling error, 'ret == 0' means skb->len is
+ * zero, which is unlikely, and 'ret > 0' means how many tx desc
+ * need to be notified to the hw.
+ */
+ ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
+ if (unlikely(ret <= 0))
+ goto fill_err;
+
+ pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
+ (ring->desc_num - 1);
+ ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |=
+ cpu_to_le16(BIT(HNS3_TXD_FE_B));
+ trace_hns3_tx_desc(ring, pre_ntu);
+
+ /* Complete translate all packets */
+ dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
+ doorbell = __netdev_tx_sent_queue(dev_queue, skb->len,
+ netdev_xmit_more());
+ hns3_tx_doorbell(ring, ret, doorbell);
+
+ return NETDEV_TX_OK;
+
+fill_err:
+ hns3_clear_desc(ring, next_to_use_head);
+
+out_err_tx_ok:
+ dev_kfree_skb_any(skb);
+ hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
+ return NETDEV_TX_OK;
+}
+
+static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct sockaddr *mac_addr = p;
+ int ret;
+
+ if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) {
+ netdev_info(netdev, "already using mac address %pM\n",
+ mac_addr->sa_data);
+ return 0;
+ }
+
+ /* For VF device, if there is a perm_addr, then the user will not
+ * be allowed to change the address.
+ */
+ if (!hns3_is_phys_func(h->pdev) &&
+ !is_zero_ether_addr(netdev->perm_addr)) {
+ netdev_err(netdev, "has permanent MAC %pM, user MAC %pM not allow\n",
+ netdev->perm_addr, mac_addr->sa_data);
+ return -EPERM;
+ }
+
+ ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
+ if (ret) {
+ netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
+ return ret;
+ }
+
+ ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
+
+ return 0;
+}
+
+static int hns3_nic_do_ioctl(struct net_device *netdev,
+ struct ifreq *ifr, int cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!netif_running(netdev))
+ return -EINVAL;
+
+ if (!h->ae_algo->ops->do_ioctl)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->do_ioctl(h, ifr, cmd);
+}
+
+static int hns3_nic_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = netdev->features ^ features;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ bool enable;
+ int ret;
+
+ if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
+ enable = !!(features & NETIF_F_GRO_HW);
+ ret = h->ae_algo->ops->set_gro_en(h, enable);
+ if (ret)
+ return ret;
+ }
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
+ h->ae_algo->ops->enable_hw_strip_rxvtag) {
+ enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX);
+ ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable);
+ if (ret)
+ return ret;
+ }
+
+ if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) {
+ enable = !!(features & NETIF_F_NTUPLE);
+ h->ae_algo->ops->enable_fd(h, enable);
+ }
+
+ netdev->features = features;
+ return 0;
+}
+
+static netdev_features_t hns3_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+#define HNS3_MAX_HDR_LEN 480U
+#define HNS3_MAX_L4_HDR_LEN 60U
+
+ size_t len;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return features;
+
+ if (skb->encapsulation)
+ len = skb_inner_transport_header(skb) - skb->data;
+ else
+ len = skb_transport_header(skb) - skb->data;
+
+ /* Assume L4 is 60 byte as TCP is the only protocol with a
+ * a flexible value, and it's max len is 60 bytes.
+ */
+ len += HNS3_MAX_L4_HDR_LEN;
+
+ /* Hardware only supports checksum on the skb with a max header
+ * len of 480 bytes.
+ */
+ if (len > HNS3_MAX_HDR_LEN)
+ features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
+
+ return features;
+}
+
+static void hns3_nic_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hnae3_handle *handle = priv->ae_handle;
+ struct hns3_enet_ring *ring;
+ u64 rx_length_errors = 0;
+ u64 rx_crc_errors = 0;
+ u64 rx_multicast = 0;
+ unsigned int start;
+ u64 tx_errors = 0;
+ u64 rx_errors = 0;
+ unsigned int idx;
+ u64 tx_bytes = 0;
+ u64 rx_bytes = 0;
+ u64 tx_pkts = 0;
+ u64 rx_pkts = 0;
+ u64 tx_drop = 0;
+ u64 rx_drop = 0;
+
+ if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
+ return;
+
+ handle->ae_algo->ops->update_stats(handle, &netdev->stats);
+
+ for (idx = 0; idx < queue_num; idx++) {
+ /* fetch the tx stats */
+ ring = &priv->ring[idx];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ tx_bytes += ring->stats.tx_bytes;
+ tx_pkts += ring->stats.tx_pkts;
+ tx_drop += ring->stats.sw_err_cnt;
+ tx_drop += ring->stats.tx_vlan_err;
+ tx_drop += ring->stats.tx_l4_proto_err;
+ tx_drop += ring->stats.tx_l2l3l4_err;
+ tx_drop += ring->stats.tx_tso_err;
+ tx_drop += ring->stats.over_max_recursion;
+ tx_drop += ring->stats.hw_limitation;
+ tx_errors += ring->stats.sw_err_cnt;
+ tx_errors += ring->stats.tx_vlan_err;
+ tx_errors += ring->stats.tx_l4_proto_err;
+ tx_errors += ring->stats.tx_l2l3l4_err;
+ tx_errors += ring->stats.tx_tso_err;
+ tx_errors += ring->stats.over_max_recursion;
+ tx_errors += ring->stats.hw_limitation;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+
+ /* fetch the rx stats */
+ ring = &priv->ring[idx + queue_num];
+ do {
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ rx_bytes += ring->stats.rx_bytes;
+ rx_pkts += ring->stats.rx_pkts;
+ rx_drop += ring->stats.l2_err;
+ rx_errors += ring->stats.l2_err;
+ rx_errors += ring->stats.l3l4_csum_err;
+ rx_crc_errors += ring->stats.l2_err;
+ rx_multicast += ring->stats.rx_multicast;
+ rx_length_errors += ring->stats.err_pkt_len;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
+ }
+
+ stats->tx_bytes = tx_bytes;
+ stats->tx_packets = tx_pkts;
+ stats->rx_bytes = rx_bytes;
+ stats->rx_packets = rx_pkts;
+
+ stats->rx_errors = rx_errors;
+ stats->multicast = rx_multicast;
+ stats->rx_length_errors = rx_length_errors;
+ stats->rx_crc_errors = rx_crc_errors;
+ stats->rx_missed_errors = netdev->stats.rx_missed_errors;
+
+ stats->tx_errors = tx_errors;
+ stats->rx_dropped = rx_drop;
+ stats->tx_dropped = tx_drop;
+ stats->collisions = netdev->stats.collisions;
+ stats->rx_over_errors = netdev->stats.rx_over_errors;
+ stats->rx_frame_errors = netdev->stats.rx_frame_errors;
+ stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
+ stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
+ stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
+ stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
+ stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
+ stats->tx_window_errors = netdev->stats.tx_window_errors;
+ stats->rx_compressed = netdev->stats.rx_compressed;
+ stats->tx_compressed = netdev->stats.tx_compressed;
+}
+
+static int hns3_setup_tc(struct net_device *netdev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
+ u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
+ struct hnae3_knic_private_info *kinfo;
+ u8 tc = mqprio_qopt->qopt.num_tc;
+ u16 mode = mqprio_qopt->mode;
+ u8 hw = mqprio_qopt->qopt.hw;
+ struct hnae3_handle *h;
+
+ if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
+ mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
+ return -EOPNOTSUPP;
+
+ if (tc > HNAE3_MAX_TC)
+ return -EINVAL;
+
+ if (!netdev)
+ return -EINVAL;
+
+ h = hns3_get_handle(netdev);
+ kinfo = &h->kinfo;
+
+ netif_dbg(h, drv, netdev, "setup tc: num_tc=%u\n", tc);
+
+ return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
+ kinfo->dcb_ops->setup_tc(h, tc ? tc : 1, prio_tc) : -EOPNOTSUPP;
+}
+
+static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ return hns3_setup_tc(dev, type_data);
+}
+
+static int hns3_vlan_rx_add_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = -EIO;
+
+ if (h->ae_algo->ops->set_vlan_filter)
+ ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
+
+ return ret;
+}
+
+static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
+ __be16 proto, u16 vid)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = -EIO;
+
+ if (h->ae_algo->ops->set_vlan_filter)
+ ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
+
+ return ret;
+}
+
+static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
+ u8 qos, __be16 vlan_proto)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = -EIO;
+
+ netif_dbg(h, drv, netdev,
+ "set vf vlan: vf=%d, vlan=%u, qos=%u, vlan_proto=0x%x\n",
+ vf, vlan, qos, ntohs(vlan_proto));
+
+ if (h->ae_algo->ops->set_vf_vlan_filter)
+ ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
+ qos, vlan_proto);
+
+ return ret;
+}
+
+static int hns3_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (!handle->ae_algo->ops->set_vf_spoofchk)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable);
+}
+
+static int hns3_set_vf_trust(struct net_device *netdev, int vf, bool enable)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+
+ if (!handle->ae_algo->ops->set_vf_trust)
+ return -EOPNOTSUPP;
+
+ return handle->ae_algo->ops->set_vf_trust(handle, vf, enable);
+}
+
+static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (!h->ae_algo->ops->set_mtu)
+ return -EOPNOTSUPP;
+
+ netif_dbg(h, drv, netdev,
+ "change mtu from %u to %d\n", netdev->mtu, new_mtu);
+
+ ret = h->ae_algo->ops->set_mtu(h, new_mtu);
+ if (ret)
+ netdev_err(netdev, "failed to change MTU in hardware %d\n",
+ ret);
+ else
+ netdev->mtu = new_mtu;
+
+ return ret;
+}
+
+static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hns3_enet_ring *tx_ring;
+ struct napi_struct *napi;
+ int timeout_queue = 0;
+ int hw_head, hw_tail;
+ int fbd_num, fbd_oft;
+ int ebd_num, ebd_oft;
+ int bd_num, bd_err;
+ int ring_en, tc;
+ int i;
+
+ /* Find the stopped queue the same way the stack does */
+ for (i = 0; i < ndev->num_tx_queues; i++) {
+ struct netdev_queue *q;
+ unsigned long trans_start;
+
+ q = netdev_get_tx_queue(ndev, i);
+ trans_start = q->trans_start;
+ if (netif_xmit_stopped(q) &&
+ time_after(jiffies,
+ (trans_start + ndev->watchdog_timeo))) {
+ timeout_queue = i;
+ netdev_info(ndev, "queue state: 0x%lx, delta msecs: %u\n",
+ q->state,
+ jiffies_to_msecs(jiffies - trans_start));
+ break;
+ }
+ }
+
+ if (i == ndev->num_tx_queues) {
+ netdev_info(ndev,
+ "no netdev TX timeout queue found, timeout count: %llu\n",
+ priv->tx_timeout_count);
+ return false;
+ }
+
+ priv->tx_timeout_count++;
+
+ tx_ring = &priv->ring[timeout_queue];
+ napi = &tx_ring->tqp_vector->napi;
+
+ netdev_info(ndev,
+ "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, napi state: %lu\n",
+ priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use,
+ tx_ring->next_to_clean, napi->state);
+
+ netdev_info(ndev,
+ "tx_pkts: %llu, tx_bytes: %llu, sw_err_cnt: %llu, tx_pending: %d\n",
+ tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes,
+ tx_ring->stats.sw_err_cnt, tx_ring->pending_buf);
+
+ netdev_info(ndev,
+ "seg_pkt_cnt: %llu, tx_more: %llu, restart_queue: %llu, tx_busy: %llu\n",
+ tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more,
+ tx_ring->stats.restart_queue, tx_ring->stats.tx_busy);
+
+ /* When mac received many pause frames continuous, it's unable to send
+ * packets, which may cause tx timeout
+ */
+ if (h->ae_algo->ops->get_mac_stats) {
+ struct hns3_mac_stats mac_stats;
+
+ h->ae_algo->ops->get_mac_stats(h, &mac_stats);
+ netdev_info(ndev, "tx_pause_cnt: %llu, rx_pause_cnt: %llu\n",
+ mac_stats.tx_pause_cnt, mac_stats.rx_pause_cnt);
+ }
+
+ hw_head = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_HEAD_REG);
+ hw_tail = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_TAIL_REG);
+ fbd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_FBDNUM_REG);
+ fbd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_OFFSET_REG);
+ ebd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBDNUM_REG);
+ ebd_oft = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_EBD_OFFSET_REG);
+ bd_num = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_NUM_REG);
+ bd_err = readl_relaxed(tx_ring->tqp->io_base +
+ HNS3_RING_TX_RING_BD_ERR_REG);
+ ring_en = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_EN_REG);
+ tc = readl_relaxed(tx_ring->tqp->io_base + HNS3_RING_TX_RING_TC_REG);
+
+ netdev_info(ndev,
+ "BD_NUM: 0x%x HW_HEAD: 0x%x, HW_TAIL: 0x%x, BD_ERR: 0x%x, INT: 0x%x\n",
+ bd_num, hw_head, hw_tail, bd_err,
+ readl(tx_ring->tqp_vector->mask_addr));
+ netdev_info(ndev,
+ "RING_EN: 0x%x, TC: 0x%x, FBD_NUM: 0x%x FBD_OFT: 0x%x, EBD_NUM: 0x%x, EBD_OFT: 0x%x\n",
+ ring_en, tc, fbd_num, fbd_oft, ebd_num, ebd_oft);
+
+ return true;
+}
+
+static void hns3_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (!hns3_get_tx_timeo_queue_info(ndev))
+ return;
+
+ /* request the reset, and let the hclge to determine
+ * which reset level should be done
+ */
+ if (h->ae_algo->ops->reset_event)
+ h->ae_algo->ops->reset_event(h->pdev, h);
+}
+
+#ifdef CONFIG_RFS_ACCEL
+static int hns3_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+ u16 rxq_index, u32 flow_id)
+{
+ struct hnae3_handle *h = hns3_get_handle(dev);
+ struct flow_keys fkeys;
+
+ if (!h->ae_algo->ops->add_arfs_entry)
+ return -EOPNOTSUPP;
+
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
+ if (!skb_flow_dissect_flow_keys(skb, &fkeys, 0))
+ return -EPROTONOSUPPORT;
+
+ if ((fkeys.basic.n_proto != htons(ETH_P_IP) &&
+ fkeys.basic.n_proto != htons(ETH_P_IPV6)) ||
+ (fkeys.basic.ip_proto != IPPROTO_TCP &&
+ fkeys.basic.ip_proto != IPPROTO_UDP))
+ return -EPROTONOSUPPORT;
+
+ return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys);
+}
+#endif
+
+static int hns3_nic_get_vf_config(struct net_device *ndev, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->get_vf_config)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_vf_config(h, vf, ivf);
+}
+
+static int hns3_nic_set_vf_link_state(struct net_device *ndev, int vf,
+ int link_state)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_link_state)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_link_state(h, vf, link_state);
+}
+
+static int hns3_nic_set_vf_rate(struct net_device *ndev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+
+ if (!h->ae_algo->ops->set_vf_rate)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate,
+ false);
+}
+
+static int hns3_nic_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->set_vf_mac)
+ return -EOPNOTSUPP;
+
+ if (is_multicast_ether_addr(mac)) {
+ netdev_err(netdev,
+ "Invalid MAC:%pM specified. Could not set MAC\n",
+ mac);
+ return -EINVAL;
+ }
+
+ return h->ae_algo->ops->set_vf_mac(h, vf_id, mac);
+}
+
+static const struct net_device_ops hns3_nic_netdev_ops = {
+ .ndo_open = hns3_nic_net_open,
+ .ndo_stop = hns3_nic_net_stop,
+ .ndo_start_xmit = hns3_nic_net_xmit,
+ .ndo_tx_timeout = hns3_nic_net_timeout,
+ .ndo_set_mac_address = hns3_nic_net_set_mac_address,
+ .ndo_do_ioctl = hns3_nic_do_ioctl,
+ .ndo_change_mtu = hns3_nic_change_mtu,
+ .ndo_set_features = hns3_nic_set_features,
+ .ndo_features_check = hns3_features_check,
+ .ndo_get_stats64 = hns3_nic_get_stats64,
+ .ndo_setup_tc = hns3_nic_setup_tc,
+ .ndo_set_rx_mode = hns3_nic_set_rx_mode,
+ .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
+ .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
+ .ndo_set_vf_spoofchk = hns3_set_vf_spoofchk,
+ .ndo_set_vf_trust = hns3_set_vf_trust,
+#ifdef CONFIG_RFS_ACCEL
+ .ndo_rx_flow_steer = hns3_rx_flow_steer,
+#endif
+ .ndo_get_vf_config = hns3_nic_get_vf_config,
+ .ndo_set_vf_link_state = hns3_nic_set_vf_link_state,
+ .ndo_set_vf_rate = hns3_nic_set_vf_rate,
+ .ndo_set_vf_mac = hns3_nic_set_vf_mac,
+};
+
+bool hns3_is_phys_func(struct pci_dev *pdev)
+{
+ u32 dev_id = pdev->device;
+
+ switch (dev_id) {
+ case HNAE3_DEV_ID_GE:
+ case HNAE3_DEV_ID_25GE:
+ case HNAE3_DEV_ID_25GE_RDMA:
+ case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_50GE_RDMA:
+ case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
+ case HNAE3_DEV_ID_100G_RDMA_MACSEC:
+ case HNAE3_DEV_ID_200G_RDMA:
+ return true;
+ case HNAE3_DEV_ID_VF:
+ case HNAE3_DEV_ID_RDMA_DCB_PFC_VF:
+ return false;
+ default:
+ dev_warn(&pdev->dev, "un-recognized pci device-id %u",
+ dev_id);
+ }
+
+ return false;
+}
+
+static void hns3_disable_sriov(struct pci_dev *pdev)
+{
+ /* If our VFs are assigned we cannot shut down SR-IOV
+ * without causing issues, so just leave the hardware
+ * available but disabled
+ */
+ if (pci_vfs_assigned(pdev)) {
+ dev_warn(&pdev->dev,
+ "disabling driver while VFs are assigned\n");
+ return;
+ }
+
+ pci_disable_sriov(pdev);
+}
+
+/* hns3_probe - Device initialization routine
+ * @pdev: PCI device information struct
+ * @ent: entry in hns3_pci_tbl
+ *
+ * hns3_probe initializes a PF identified by a pci_dev structure.
+ * The OS initialization, configuring of the PF private structure,
+ * and a hardware reset occur.
+ *
+ * Returns 0 on success, negative on failure
+ */
+static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct hnae3_ae_dev *ae_dev;
+ int ret;
+
+ ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL);
+ if (!ae_dev)
+ return -ENOMEM;
+
+ ae_dev->pdev = pdev;
+ ae_dev->flag = ent->driver_data;
+ pci_set_drvdata(pdev, ae_dev);
+
+ ret = hnae3_register_ae_dev(ae_dev);
+ if (ret)
+ pci_set_drvdata(pdev, NULL);
+
+ return ret;
+}
+
+/* hns3_remove - Device removal routine
+ * @pdev: PCI device information struct
+ */
+static void hns3_remove(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
+ hns3_disable_sriov(pdev);
+
+ hnae3_unregister_ae_dev(ae_dev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+/**
+ * hns3_pci_sriov_configure
+ * @pdev: pointer to a pci_dev structure
+ * @num_vfs: number of VFs to allocate
+ *
+ * Enable or change the number of VFs. Called when the user updates the number
+ * of VFs in sysfs.
+ **/
+static int hns3_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
+{
+ int ret;
+
+ if (!(hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))) {
+ dev_warn(&pdev->dev, "Can not config SRIOV\n");
+ return -EINVAL;
+ }
+
+ if (num_vfs) {
+ ret = pci_enable_sriov(pdev, num_vfs);
+ if (ret)
+ dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
+ else
+ return num_vfs;
+ } else if (!pci_vfs_assigned(pdev)) {
+ pci_disable_sriov(pdev);
+ } else {
+ dev_warn(&pdev->dev,
+ "Unable to free VFs because some are assigned to VMs.\n");
+ }
+
+ return 0;
+}
+
+static void hns3_shutdown(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ hnae3_unregister_ae_dev(ae_dev);
+ pci_set_drvdata(pdev, NULL);
+
+ if (system_state == SYSTEM_POWER_OFF)
+ pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev,
+ pci_channel_state_t state)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ pci_ers_result_t ret;
+
+ dev_info(&pdev->dev, "PCI error detected, state(=%d)!!\n", state);
+
+ if (state == pci_channel_io_perm_failure)
+ return PCI_ERS_RESULT_DISCONNECT;
+
+ if (!ae_dev || !ae_dev->ops) {
+ dev_err(&pdev->dev,
+ "Can't recover - error happened before device initialized\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ if (ae_dev->ops->handle_hw_ras_error)
+ ret = ae_dev->ops->handle_hw_ras_error(ae_dev);
+ else
+ return PCI_ERS_RESULT_NONE;
+
+ return ret;
+}
+
+static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ const struct hnae3_ae_ops *ops;
+ enum hnae3_reset_type reset_type;
+ struct device *dev = &pdev->dev;
+
+ if (!ae_dev || !ae_dev->ops)
+ return PCI_ERS_RESULT_NONE;
+
+ ops = ae_dev->ops;
+ /* request the reset */
+ if (ops->reset_event && ops->get_reset_level &&
+ ops->set_default_reset_request) {
+ if (ae_dev->hw_err_reset_req) {
+ reset_type = ops->get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ ops->set_default_reset_request(ae_dev, reset_type);
+ dev_info(dev, "requesting reset due to PCI error\n");
+ ops->reset_event(pdev, NULL);
+ }
+
+ return PCI_ERS_RESULT_RECOVERED;
+ }
+
+ return PCI_ERS_RESULT_DISCONNECT;
+}
+
+static void hns3_reset_prepare(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "FLR prepare\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare)
+ ae_dev->ops->flr_prepare(ae_dev);
+}
+
+static void hns3_reset_done(struct pci_dev *pdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "FLR done\n");
+ if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done)
+ ae_dev->ops->flr_done(ae_dev);
+}
+
+static const struct pci_error_handlers hns3_err_handler = {
+ .error_detected = hns3_error_detected,
+ .slot_reset = hns3_slot_reset,
+ .reset_prepare = hns3_reset_prepare,
+ .reset_done = hns3_reset_done,
+};
+
+static struct pci_driver hns3_driver = {
+ .name = hns3_driver_name,
+ .id_table = hns3_pci_tbl,
+ .probe = hns3_probe,
+ .remove = hns3_remove,
+ .shutdown = hns3_shutdown,
+ .sriov_configure = hns3_pci_sriov_configure,
+ .err_handler = &hns3_err_handler,
+};
+
+/* set default feature to hns3 */
+static void hns3_set_default_feature(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct pci_dev *pdev = h->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST;
+
+ netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
+
+ netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
+
+ netdev->vlan_features |=
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
+ NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
+
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
+ NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
+ NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
+ NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC |
+ NETIF_F_FRAGLIST;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ netdev->hw_features |= NETIF_F_GRO_HW;
+ netdev->features |= NETIF_F_GRO_HW;
+
+ if (!(h->flags & HNAE3_SUPPORT_VF)) {
+ netdev->hw_features |= NETIF_F_NTUPLE;
+ netdev->features |= NETIF_F_NTUPLE;
+ }
+ }
+
+ if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) {
+ netdev->hw_features |= NETIF_F_GSO_UDP_L4;
+ netdev->features |= NETIF_F_GSO_UDP_L4;
+ netdev->vlan_features |= NETIF_F_GSO_UDP_L4;
+ netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4;
+ }
+}
+
+static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb)
+{
+ unsigned int order = hns3_page_order(ring);
+ struct page *p;
+
+ p = dev_alloc_pages(order);
+ if (!p)
+ return -ENOMEM;
+
+ cb->priv = p;
+ cb->page_offset = 0;
+ cb->reuse_flag = 0;
+ cb->buf = page_address(p);
+ cb->length = hns3_page_size(ring);
+ cb->type = DESC_TYPE_PAGE;
+ page_ref_add(p, USHRT_MAX - 1);
+ cb->pagecnt_bias = USHRT_MAX;
+
+ return 0;
+}
+
+static void hns3_free_buffer(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb, int budget)
+{
+ if (cb->type == DESC_TYPE_SKB)
+ napi_consume_skb(cb->priv, budget);
+ else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias)
+ __page_frag_cache_drain(cb->priv, cb->pagecnt_bias);
+ memset(cb, 0, sizeof(*cb));
+}
+
+static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
+{
+ cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
+ cb->length, ring_to_dma_dir(ring));
+
+ if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma)))
+ return -EIO;
+
+ return 0;
+}
+
+static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb)
+{
+ if (cb->type == DESC_TYPE_SKB || cb->type == DESC_TYPE_FRAGLIST_SKB)
+ dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+ else if (cb->length)
+ dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
+ ring_to_dma_dir(ring));
+}
+
+static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
+{
+ hns3_unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc[i].addr = 0;
+ ring->desc_cb[i].refill = 0;
+}
+
+static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i,
+ int budget)
+{
+ struct hns3_desc_cb *cb = &ring->desc_cb[i];
+
+ if (!ring->desc_cb[i].dma)
+ return;
+
+ hns3_buffer_detach(ring, i);
+ hns3_free_buffer(ring, cb, budget);
+}
+
+static void hns3_free_buffers(struct hns3_enet_ring *ring)
+{
+ int i;
+
+ for (i = 0; i < ring->desc_num; i++)
+ hns3_free_buffer_detach(ring, i, 0);
+}
+
+/* free desc along with its attached buffer */
+static void hns3_free_desc(struct hns3_enet_ring *ring)
+{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
+ hns3_free_buffers(ring);
+
+ if (ring->desc) {
+ dma_free_coherent(ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
+}
+
+static int hns3_alloc_desc(struct hns3_enet_ring *ring)
+{
+ int size = ring->desc_num * sizeof(ring->desc[0]);
+
+ ring->desc = dma_alloc_coherent(ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring,
+ struct hns3_desc_cb *cb)
+{
+ int ret;
+
+ ret = hns3_alloc_buffer(ring, cb);
+ if (ret)
+ goto out;
+
+ ret = hns3_map_buffer(ring, cb);
+ if (ret)
+ goto out_with_buf;
+
+ return 0;
+
+out_with_buf:
+ hns3_free_buffer(ring, cb, 0);
+out:
+ return ret;
+}
+
+static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i)
+{
+ int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]);
+
+ if (ret)
+ return ret;
+
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc_cb[i].refill = 1;
+
+ return 0;
+}
+
+/* Allocate memory for raw pkg, and map with dma */
+static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
+{
+ int i, j, ret;
+
+ for (i = 0; i < ring->desc_num; i++) {
+ ret = hns3_alloc_and_attach_buffer(ring, i);
+ if (ret)
+ goto out_buffer_fail;
+ }
+
+ return 0;
+
+out_buffer_fail:
+ for (j = i - 1; j >= 0; j--)
+ hns3_free_buffer_detach(ring, j, 0);
+ return ret;
+}
+
+/* detach a in-used buffer and replace with a reserved one */
+static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
+ struct hns3_desc_cb *res_cb)
+{
+ hns3_unmap_buffer(ring, &ring->desc_cb[i]);
+ ring->desc_cb[i] = *res_cb;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
+ ring->desc_cb[i].refill = 1;
+ ring->desc[i].rx.bd_base_info = 0;
+}
+
+static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
+{
+ ring->desc_cb[i].reuse_flag = 0;
+ ring->desc_cb[i].refill = 1;
+ ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma +
+ ring->desc_cb[i].page_offset);
+ ring->desc[i].rx.bd_base_info = 0;
+
+ dma_sync_single_for_device(ring_to_dev(ring),
+ ring->desc_cb[i].dma + ring->desc_cb[i].page_offset,
+ hns3_buf_size(ring),
+ DMA_FROM_DEVICE);
+}
+
+static bool hns3_nic_reclaim_desc(struct hns3_enet_ring *ring,
+ int *bytes, int *pkts, int budget)
+{
+ /* pair with ring->last_to_use update in hns3_tx_doorbell(),
+ * smp_store_release() is not used in hns3_tx_doorbell() because
+ * the doorbell operation already have the needed barrier operation.
+ */
+ int ltu = smp_load_acquire(&ring->last_to_use);
+ int ntc = ring->next_to_clean;
+ struct hns3_desc_cb *desc_cb;
+ bool reclaimed = false;
+ struct hns3_desc *desc;
+
+ while (ltu != ntc) {
+ desc = &ring->desc[ntc];
+
+ if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) &
+ BIT(HNS3_TXD_VLD_B))
+ break;
+
+ desc_cb = &ring->desc_cb[ntc];
+ (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
+ (*bytes) += desc_cb->length;
+ /* desc_cb will be cleaned, after hnae3_free_buffer_detach */
+ hns3_free_buffer_detach(ring, ntc, budget);
+
+ if (++ntc == ring->desc_num)
+ ntc = 0;
+
+ /* Issue prefetch for next Tx descriptor */
+ prefetch(&ring->desc_cb[ntc]);
+ reclaimed = true;
+ }
+
+ if (unlikely(!reclaimed))
+ return false;
+
+ /* This smp_store_release() pairs with smp_load_acquire() in
+ * ring_space called by hns3_nic_net_xmit.
+ */
+ smp_store_release(&ring->next_to_clean, ntc);
+ return true;
+}
+
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct netdev_queue *dev_queue;
+ int bytes, pkts;
+
+ bytes = 0;
+ pkts = 0;
+
+ if (unlikely(!hns3_nic_reclaim_desc(ring, &bytes, &pkts, budget)))
+ return;
+
+ ring->tqp_vector->tx_group.total_bytes += bytes;
+ ring->tqp_vector->tx_group.total_packets += pkts;
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.tx_bytes += bytes;
+ ring->stats.tx_pkts += pkts;
+ u64_stats_update_end(&ring->syncp);
+
+ dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
+ netdev_tx_completed_queue(dev_queue, pkts, bytes);
+
+ if (unlikely(netif_carrier_ok(netdev) &&
+ ring_space(ring) > HNS3_MAX_TSO_BD_NUM)) {
+ /* Make sure that anybody stopping the queue after this
+ * sees the new next_to_clean.
+ */
+ smp_mb();
+ if (netif_tx_queue_stopped(dev_queue) &&
+ !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) {
+ netif_tx_wake_queue(dev_queue);
+ ring->stats.restart_queue++;
+ }
+ }
+}
+
+static int hns3_desc_unused(struct hns3_enet_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+
+ if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill))
+ return ring->desc_num;
+
+ return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
+}
+
+/* Return true if there is any allocation failure */
+static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
+ int cleand_count)
+{
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc_cb res_cbs;
+ int i, ret;
+
+ for (i = 0; i < cleand_count; i++) {
+ desc_cb = &ring->desc_cb[ring->next_to_use];
+ if (desc_cb->reuse_flag) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.reuse_pg_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ hns3_reuse_buffer(ring, ring->next_to_use);
+ } else {
+ ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
+ if (ret) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ hns3_rl_err(ring_to_netdev(ring),
+ "alloc rx buffer failed: %d\n",
+ ret);
+
+ writel(i, ring->tqp->io_base +
+ HNS3_RING_RX_RING_HEAD_REG);
+ return true;
+ }
+ hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.non_reuse_pg++;
+ u64_stats_update_end(&ring->syncp);
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
+ return false;
+}
+
+static bool hns3_page_is_reusable(struct page *page)
+{
+ return page_to_nid(page) == numa_mem_id() &&
+ !page_is_pfmemalloc(page);
+}
+
+static bool hns3_can_reuse_page(struct hns3_desc_cb *cb)
+{
+ return (page_count(cb->priv) - cb->pagecnt_bias) == 1;
+}
+
+static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
+ struct hns3_enet_ring *ring, int pull_len,
+ struct hns3_desc_cb *desc_cb)
+{
+ struct hns3_desc *desc = &ring->desc[ring->next_to_clean];
+ int size = le16_to_cpu(desc->rx.size);
+ u32 truesize = hns3_buf_size(ring);
+
+ desc_cb->pagecnt_bias--;
+ skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
+ size - pull_len, truesize);
+
+ /* Avoid re-using remote pages, or the stack is still using the page
+ * when page_offset rollback to zero, flag default unreuse
+ */
+ if (unlikely(!hns3_page_is_reusable(desc_cb->priv)) ||
+ (!desc_cb->page_offset && !hns3_can_reuse_page(desc_cb))) {
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
+ return;
+ }
+
+ /* Move offset up to the next cache line */
+ desc_cb->page_offset += truesize;
+
+ if (desc_cb->page_offset + truesize <= hns3_page_size(ring)) {
+ desc_cb->reuse_flag = 1;
+ } else if (hns3_can_reuse_page(desc_cb)) {
+ desc_cb->reuse_flag = 1;
+ desc_cb->page_offset = 0;
+ } else if (desc_cb->pagecnt_bias) {
+ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias);
+ return;
+ }
+
+ if (unlikely(!desc_cb->pagecnt_bias)) {
+ page_ref_add(desc_cb->priv, USHRT_MAX);
+ desc_cb->pagecnt_bias = USHRT_MAX;
+ }
+}
+
+static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
+{
+ __be16 type = skb->protocol;
+ struct tcphdr *th;
+ int depth = 0;
+
+ while (eth_type_vlan(type)) {
+ struct vlan_hdr *vh;
+
+ if ((depth + VLAN_HLEN) > skb_headlen(skb))
+ return -EFAULT;
+
+ vh = (struct vlan_hdr *)(skb->data + depth);
+ type = vh->h_vlan_encapsulated_proto;
+ depth += VLAN_HLEN;
+ }
+
+ skb_set_network_header(skb, depth);
+
+ if (type == htons(ETH_P_IP)) {
+ const struct iphdr *iph = ip_hdr(skb);
+
+ depth += sizeof(struct iphdr);
+ skb_set_transport_header(skb, depth);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v4_check(skb->len - depth, iph->saddr,
+ iph->daddr, 0);
+ } else if (type == htons(ETH_P_IPV6)) {
+ const struct ipv6hdr *iph = ipv6_hdr(skb);
+
+ depth += sizeof(struct ipv6hdr);
+ skb_set_transport_header(skb, depth);
+ th = tcp_hdr(skb);
+ th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr,
+ &iph->daddr, 0);
+ } else {
+ hns3_rl_err(skb->dev,
+ "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n",
+ be16_to_cpu(type), depth);
+ return -EFAULT;
+ }
+
+ skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
+ if (th->cwr)
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
+
+ if (l234info & BIT(HNS3_RXD_GRO_FIXID_B))
+ skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
+
+ skb->csum_start = (unsigned char *)th - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
+ trace_hns3_gro(skb);
+
+ return 0;
+}
+
+static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
+ u32 l234info, u32 bd_base_info, u32 ol_info)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+ int l3_type, l4_type;
+ int ol4_type;
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ skb_checksum_none_assert(skb);
+
+ if (!(netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* check if hardware has done checksum */
+ if (!(bd_base_info & BIT(HNS3_RXD_L3L4P_B)))
+ return;
+
+ if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
+ BIT(HNS3_RXD_OL3E_B) |
+ BIT(HNS3_RXD_OL4E_B)))) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.l3l4_csum_err++;
+ u64_stats_update_end(&ring->syncp);
+
+ return;
+ }
+
+ ol4_type = hnae3_get_field(ol_info, HNS3_RXD_OL4ID_M,
+ HNS3_RXD_OL4ID_S);
+ switch (ol4_type) {
+ case HNS3_OL4_TYPE_MAC_IN_UDP:
+ case HNS3_OL4_TYPE_NVGRE:
+ skb->csum_level = 1;
+ fallthrough;
+ case HNS3_OL4_TYPE_NO_TUN:
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
+ HNS3_RXD_L3ID_S);
+ l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M,
+ HNS3_RXD_L4ID_S);
+
+ /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
+ if ((l3_type == HNS3_L3_TYPE_IPV4 ||
+ l3_type == HNS3_L3_TYPE_IPV6) &&
+ (l4_type == HNS3_L4_TYPE_UDP ||
+ l4_type == HNS3_L4_TYPE_TCP ||
+ l4_type == HNS3_L4_TYPE_SCTP))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ break;
+ default:
+ break;
+ }
+}
+
+static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
+{
+ if (skb_has_frag_list(skb))
+ napi_gro_flush(&ring->tqp_vector->napi, false);
+
+ napi_gro_receive(&ring->tqp_vector->napi, skb);
+}
+
+static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
+ struct hns3_desc *desc, u32 l234info,
+ u16 *vlan_tag)
+{
+ struct hnae3_handle *handle = ring->tqp->handle;
+ struct pci_dev *pdev = ring->tqp->handle->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+
+ if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) {
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ if (!(*vlan_tag & VLAN_VID_MASK))
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return (*vlan_tag != 0);
+ }
+
+#define HNS3_STRP_OUTER_VLAN 0x1
+#define HNS3_STRP_INNER_VLAN 0x2
+#define HNS3_STRP_BOTH 0x3
+
+ /* Hardware always insert VLAN tag into RX descriptor when
+ * remove the tag from packet, driver needs to determine
+ * reporting which tag to stack.
+ */
+ switch (hnae3_get_field(l234info, HNS3_RXD_STRP_TAGP_M,
+ HNS3_RXD_STRP_TAGP_S)) {
+ case HNS3_STRP_OUTER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ return true;
+ case HNS3_STRP_INNER_VLAN:
+ if (handle->port_base_vlan_state !=
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ return false;
+
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+ return true;
+ case HNS3_STRP_BOTH:
+ if (handle->port_base_vlan_state ==
+ HNAE3_PORT_BASE_VLAN_DISABLE)
+ *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
+ else
+ *vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
+
+ return true;
+ default:
+ return false;
+ }
+}
+
+static void hns3_rx_ring_move_fw(struct hns3_enet_ring *ring)
+{
+ ring->desc[ring->next_to_clean].rx.bd_base_info &=
+ cpu_to_le32(~BIT(HNS3_RXD_VLD_B));
+ ring->desc_cb[ring->next_to_clean].refill = 0;
+ ring->next_to_clean += 1;
+
+ if (unlikely(ring->next_to_clean == ring->desc_num))
+ ring->next_to_clean = 0;
+}
+
+static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
+ unsigned char *va)
+{
+ struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
+ struct net_device *netdev = ring_to_netdev(ring);
+ struct sk_buff *skb;
+
+ ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
+ skb = ring->skb;
+ if (unlikely(!skb)) {
+ hns3_rl_err(netdev, "alloc rx skb fail\n");
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -ENOMEM;
+ }
+
+ trace_hns3_rx_desc(ring);
+ prefetchw(skb->data);
+
+ ring->pending_buf = 1;
+ ring->frag_num = 0;
+ ring->tail_skb = NULL;
+ if (length <= HNS3_RX_HEAD_SIZE) {
+ memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
+
+ /* We can reuse buffer as-is, just make sure it is local */
+ if (likely(hns3_page_is_reusable(desc_cb->priv)))
+ desc_cb->reuse_flag = 1;
+ else /* This page cannot be reused so discard it */
+ __page_frag_cache_drain(desc_cb->priv,
+ desc_cb->pagecnt_bias);
+
+ hns3_rx_ring_move_fw(ring);
+ return 0;
+ }
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.seg_pkt_cnt++;
+ u64_stats_update_end(&ring->syncp);
+
+ ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
+ __skb_put(skb, ring->pull_len);
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
+ desc_cb);
+ hns3_rx_ring_move_fw(ring);
+
+ return 0;
+}
+
+static int hns3_add_frag(struct hns3_enet_ring *ring)
+{
+ struct sk_buff *skb = ring->skb;
+ struct sk_buff *head_skb = skb;
+ struct sk_buff *new_skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *desc;
+ u32 bd_base_info;
+
+ do {
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ /* make sure HW write desc complete */
+ dma_rmb();
+ if (!(bd_base_info & BIT(HNS3_RXD_VLD_B)))
+ return -ENXIO;
+
+ if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
+ new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
+ if (unlikely(!new_skb)) {
+ hns3_rl_err(ring_to_netdev(ring),
+ "alloc rx fraglist skb fail\n");
+ return -ENXIO;
+ }
+ ring->frag_num = 0;
+
+ if (ring->tail_skb) {
+ ring->tail_skb->next = new_skb;
+ ring->tail_skb = new_skb;
+ } else {
+ skb_shinfo(skb)->frag_list = new_skb;
+ ring->tail_skb = new_skb;
+ }
+ }
+
+ if (ring->tail_skb) {
+ head_skb->truesize += hns3_buf_size(ring);
+ head_skb->data_len += le16_to_cpu(desc->rx.size);
+ head_skb->len += le16_to_cpu(desc->rx.size);
+ skb = ring->tail_skb;
+ }
+
+ dma_sync_single_for_cpu(ring_to_dev(ring),
+ desc_cb->dma + desc_cb->page_offset,
+ hns3_buf_size(ring),
+ DMA_FROM_DEVICE);
+
+ hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
+ trace_hns3_rx_desc(ring);
+ hns3_rx_ring_move_fw(ring);
+ ring->pending_buf++;
+ } while (!(bd_base_info & BIT(HNS3_RXD_FE_B)));
+
+ return 0;
+}
+
+static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 l234info,
+ u32 bd_base_info, u32 ol_info)
+{
+ u32 l3_type;
+
+ skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
+ HNS3_RXD_GRO_SIZE_M,
+ HNS3_RXD_GRO_SIZE_S);
+ /* if there is no HW GRO, do not set gro params */
+ if (!skb_shinfo(skb)->gso_size) {
+ hns3_rx_checksum(ring, skb, l234info, bd_base_info, ol_info);
+ return 0;
+ }
+
+ NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info,
+ HNS3_RXD_GRO_COUNT_M,
+ HNS3_RXD_GRO_COUNT_S);
+
+ l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S);
+ if (l3_type == HNS3_L3_TYPE_IPV4)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+ else if (l3_type == HNS3_L3_TYPE_IPV6)
+ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+ else
+ return -EFAULT;
+
+ return hns3_gro_complete(skb, l234info);
+}
+
+static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
+ struct sk_buff *skb, u32 rss_hash)
+{
+ struct hnae3_handle *handle = ring->tqp->handle;
+ enum pkt_hash_types rss_type;
+
+ if (rss_hash)
+ rss_type = handle->kinfo.rss_type;
+ else
+ rss_type = PKT_HASH_TYPE_NONE;
+
+ skb_set_hash(skb, rss_hash, rss_type);
+}
+
+static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
+{
+ struct net_device *netdev = ring_to_netdev(ring);
+ enum hns3_pkt_l2t_type l2_frame_type;
+ u32 bd_base_info, l234info, ol_info;
+ struct hns3_desc *desc;
+ unsigned int len;
+ int pre_ntc, ret;
+
+ /* bdinfo handled below is only valid on the last BD of the
+ * current packet, and ring->next_to_clean indicates the first
+ * descriptor of next packet, so need - 1 below.
+ */
+ pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) :
+ (ring->desc_num - 1);
+ desc = &ring->desc[pre_ntc];
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+ l234info = le32_to_cpu(desc->rx.l234_info);
+ ol_info = le32_to_cpu(desc->rx.ol_info);
+
+ /* Based on hw strategy, the tag offloaded will be stored at
+ * ot_vlan_tag in two layer tag case, and stored at vlan_tag
+ * in one layer tag case.
+ */
+ if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ u16 vlan_tag;
+
+ if (hns3_parse_vlan_tag(ring, desc, l234info, &vlan_tag))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ vlan_tag);
+ }
+
+ if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) |
+ BIT(HNS3_RXD_L2E_B))))) {
+ u64_stats_update_begin(&ring->syncp);
+ if (l234info & BIT(HNS3_RXD_L2E_B))
+ ring->stats.l2_err++;
+ else
+ ring->stats.err_pkt_len++;
+ u64_stats_update_end(&ring->syncp);
+
+ return -EFAULT;
+ }
+
+ len = skb->len;
+
+ /* Do update ip stack process */
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* This is needed in order to enable forwarding support */
+ ret = hns3_set_gro_and_checksum(ring, skb, l234info,
+ bd_base_info, ol_info);
+ if (unlikely(ret)) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ return ret;
+ }
+
+ l2_frame_type = hnae3_get_field(l234info, HNS3_RXD_DMAC_M,
+ HNS3_RXD_DMAC_S);
+
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.rx_pkts++;
+ ring->stats.rx_bytes += len;
+
+ if (l2_frame_type == HNS3_L2_TYPE_MULTICAST)
+ ring->stats.rx_multicast++;
+
+ u64_stats_update_end(&ring->syncp);
+
+ ring->tqp_vector->rx_group.total_bytes += len;
+
+ hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash));
+ return 0;
+}
+
+static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
+{
+ struct sk_buff *skb = ring->skb;
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_desc *desc;
+ unsigned int length;
+ u32 bd_base_info;
+ int ret;
+
+ desc = &ring->desc[ring->next_to_clean];
+ desc_cb = &ring->desc_cb[ring->next_to_clean];
+
+ prefetch(desc);
+
+ if (!skb) {
+ bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
+
+ /* Check valid BD */
+ if (unlikely(!(bd_base_info & BIT(HNS3_RXD_VLD_B))))
+ return -ENXIO;
+
+ dma_rmb();
+ length = le16_to_cpu(desc->rx.size);
+
+ ring->va = desc_cb->buf + desc_cb->page_offset;
+
+ dma_sync_single_for_cpu(ring_to_dev(ring),
+ desc_cb->dma + desc_cb->page_offset,
+ hns3_buf_size(ring),
+ DMA_FROM_DEVICE);
+
+ /* Prefetch first cache line of first page.
+ * Idea is to cache few bytes of the header of the packet.
+ * Our L1 Cache line size is 64B so need to prefetch twice to make
+ * it 128B. But in actual we can have greater size of caches with
+ * 128B Level 1 cache lines. In such a case, single fetch would
+ * suffice to cache in the relevant part of the header.
+ */
+ net_prefetch(ring->va);
+
+ ret = hns3_alloc_skb(ring, length, ring->va);
+ skb = ring->skb;
+
+ if (ret < 0) /* alloc buffer fail */
+ return ret;
+ if (!(bd_base_info & BIT(HNS3_RXD_FE_B))) { /* need add frag */
+ ret = hns3_add_frag(ring);
+ if (ret)
+ return ret;
+ }
+ } else {
+ ret = hns3_add_frag(ring);
+ if (ret)
+ return ret;
+ }
+
+ /* As the head data may be changed when GRO enable, copy
+ * the head data in after other data rx completed
+ */
+ if (skb->len > HNS3_RX_HEAD_SIZE)
+ memcpy(skb->data, ring->va,
+ ALIGN(ring->pull_len, sizeof(long)));
+
+ ret = hns3_handle_bdinfo(ring, skb);
+ if (unlikely(ret)) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ skb_record_rx_queue(skb, ring->tqp->tqp_index);
+ return 0;
+}
+
+int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
+{
+#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
+ int unused_count = hns3_desc_unused(ring);
+ bool failure = false;
+ int recv_pkts = 0;
+ int err;
+
+ unused_count -= ring->pending_buf;
+
+ while (recv_pkts < budget) {
+ /* Reuse or realloc buffers */
+ if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
+ failure = failure ||
+ hns3_nic_alloc_rx_buffers(ring, unused_count);
+ unused_count = 0;
+ }
+
+ /* Poll one pkt */
+ err = hns3_handle_rx_bd(ring);
+ /* Do not get FE for the packet or failed to alloc skb */
+ if (unlikely(!ring->skb || err == -ENXIO)) {
+ goto out;
+ } else if (likely(!err)) {
+ rx_fn(ring, ring->skb);
+ recv_pkts++;
+ }
+
+ unused_count += ring->pending_buf;
+ ring->skb = NULL;
+ ring->pending_buf = 0;
+ }
+
+out:
+ return failure ? budget : recv_pkts;
+}
+
+static bool hns3_get_new_flow_lvl(struct hns3_enet_ring_group *ring_group)
+{
+#define HNS3_RX_LOW_BYTE_RATE 10000
+#define HNS3_RX_MID_BYTE_RATE 20000
+#define HNS3_RX_ULTRA_PACKET_RATE 40
+
+ enum hns3_flow_level_range new_flow_level;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int packets_per_msecs, bytes_per_msecs;
+ u32 time_passed_ms;
+
+ tqp_vector = ring_group->ring->tqp_vector;
+ time_passed_ms =
+ jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
+ if (!time_passed_ms)
+ return false;
+
+ do_div(ring_group->total_packets, time_passed_ms);
+ packets_per_msecs = ring_group->total_packets;
+
+ do_div(ring_group->total_bytes, time_passed_ms);
+ bytes_per_msecs = ring_group->total_bytes;
+
+ new_flow_level = ring_group->coal.flow_level;
+
+ /* Simple throttlerate management
+ * 0-10MB/s lower (50000 ints/s)
+ * 10-20MB/s middle (20000 ints/s)
+ * 20-1249MB/s high (18000 ints/s)
+ * > 40000pps ultra (8000 ints/s)
+ */
+ switch (new_flow_level) {
+ case HNS3_FLOW_LOW:
+ if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
+ new_flow_level = HNS3_FLOW_MID;
+ break;
+ case HNS3_FLOW_MID:
+ if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
+ new_flow_level = HNS3_FLOW_HIGH;
+ else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
+ new_flow_level = HNS3_FLOW_LOW;
+ break;
+ case HNS3_FLOW_HIGH:
+ case HNS3_FLOW_ULTRA:
+ default:
+ if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
+ new_flow_level = HNS3_FLOW_MID;
+ break;
+ }
+
+ if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
+ &tqp_vector->rx_group == ring_group)
+ new_flow_level = HNS3_FLOW_ULTRA;
+
+ ring_group->total_bytes = 0;
+ ring_group->total_packets = 0;
+ ring_group->coal.flow_level = new_flow_level;
+
+ return true;
+}
+
+static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
+{
+ struct hns3_enet_tqp_vector *tqp_vector;
+ u16 new_int_gl;
+
+ if (!ring_group->ring)
+ return false;
+
+ tqp_vector = ring_group->ring->tqp_vector;
+ if (!tqp_vector->last_jiffies)
+ return false;
+
+ if (ring_group->total_packets == 0) {
+ ring_group->coal.int_gl = HNS3_INT_GL_50K;
+ ring_group->coal.flow_level = HNS3_FLOW_LOW;
+ return true;
+ }
+
+ if (!hns3_get_new_flow_lvl(ring_group))
+ return false;
+
+ new_int_gl = ring_group->coal.int_gl;
+ switch (ring_group->coal.flow_level) {
+ case HNS3_FLOW_LOW:
+ new_int_gl = HNS3_INT_GL_50K;
+ break;
+ case HNS3_FLOW_MID:
+ new_int_gl = HNS3_INT_GL_20K;
+ break;
+ case HNS3_FLOW_HIGH:
+ new_int_gl = HNS3_INT_GL_18K;
+ break;
+ case HNS3_FLOW_ULTRA:
+ new_int_gl = HNS3_INT_GL_8K;
+ break;
+ default:
+ break;
+ }
+
+ if (new_int_gl != ring_group->coal.int_gl) {
+ ring_group->coal.int_gl = new_int_gl;
+ return true;
+ }
+ return false;
+}
+
+static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
+{
+ struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
+ struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
+ bool rx_update, tx_update;
+
+ /* update param every 1000ms */
+ if (time_before(jiffies,
+ tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
+ return;
+
+ if (rx_group->coal.gl_adapt_enable) {
+ rx_update = hns3_get_new_int_gl(rx_group);
+ if (rx_update)
+ hns3_set_vector_coalesce_rx_gl(tqp_vector,
+ rx_group->coal.int_gl);
+ }
+
+ if (tx_group->coal.gl_adapt_enable) {
+ tx_update = hns3_get_new_int_gl(tx_group);
+ if (tx_update)
+ hns3_set_vector_coalesce_tx_gl(tqp_vector,
+ tx_group->coal.int_gl);
+ }
+
+ tqp_vector->last_jiffies = jiffies;
+}
+
+static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
+{
+ struct hns3_nic_priv *priv = netdev_priv(napi->dev);
+ struct hns3_enet_ring *ring;
+ int rx_pkt_total = 0;
+
+ struct hns3_enet_tqp_vector *tqp_vector =
+ container_of(napi, struct hns3_enet_tqp_vector, napi);
+ bool clean_complete = true;
+ int rx_budget = budget;
+
+ if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ napi_complete(napi);
+ return 0;
+ }
+
+ /* Since the actual Tx work is minimal, we can give the Tx a larger
+ * budget and be more aggressive about cleaning up the Tx descriptors.
+ */
+ hns3_for_each_ring(ring, tqp_vector->tx_group)
+ hns3_clean_tx_ring(ring, budget);
+
+ /* make sure rx ring budget not smaller than 1 */
+ if (tqp_vector->num_tqps > 1)
+ rx_budget = max(budget / tqp_vector->num_tqps, 1);
+
+ hns3_for_each_ring(ring, tqp_vector->rx_group) {
+ int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
+ hns3_rx_skb);
+
+ if (rx_cleaned >= rx_budget)
+ clean_complete = false;
+
+ rx_pkt_total += rx_cleaned;
+ }
+
+ tqp_vector->rx_group.total_packets += rx_pkt_total;
+
+ if (!clean_complete)
+ return budget;
+
+ if (napi_complete(napi) &&
+ likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
+ hns3_update_new_int_gl(tqp_vector);
+ hns3_mask_vector_irq(tqp_vector, 1);
+ }
+
+ return rx_pkt_total;
+}
+
+static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hnae3_ring_chain_node *head)
+{
+ struct pci_dev *pdev = tqp_vector->handle->pdev;
+ struct hnae3_ring_chain_node *cur_chain = head;
+ struct hnae3_ring_chain_node *chain;
+ struct hns3_enet_ring *tx_ring;
+ struct hns3_enet_ring *rx_ring;
+
+ tx_ring = tqp_vector->tx_group.ring;
+ if (tx_ring) {
+ cur_chain->tqp_index = tx_ring->tqp->tqp_index;
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
+
+ cur_chain->next = NULL;
+
+ while (tx_ring->next) {
+ tx_ring = tx_ring->next;
+
+ chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
+ GFP_KERNEL);
+ if (!chain)
+ goto err_free_chain;
+
+ cur_chain->next = chain;
+ chain->tqp_index = tx_ring->tqp->tqp_index;
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_TX);
+ hnae3_set_field(chain->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ HNAE3_RING_GL_TX);
+
+ cur_chain = chain;
+ }
+ }
+
+ rx_ring = tqp_vector->rx_group.ring;
+ if (!tx_ring && rx_ring) {
+ cur_chain->next = NULL;
+ cur_chain->tqp_index = rx_ring->tqp->tqp_index;
+ hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+
+ rx_ring = rx_ring->next;
+ }
+
+ while (rx_ring) {
+ chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
+ if (!chain)
+ goto err_free_chain;
+
+ cur_chain->next = chain;
+ chain->tqp_index = rx_ring->tqp->tqp_index;
+ hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
+ HNAE3_RING_TYPE_RX);
+ hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
+
+ cur_chain = chain;
+
+ rx_ring = rx_ring->next;
+ }
+
+ return 0;
+
+err_free_chain:
+ cur_chain = head->next;
+ while (cur_chain) {
+ chain = cur_chain->next;
+ devm_kfree(&pdev->dev, cur_chain);
+ cur_chain = chain;
+ }
+ head->next = NULL;
+
+ return -ENOMEM;
+}
+
+static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
+ struct hnae3_ring_chain_node *head)
+{
+ struct pci_dev *pdev = tqp_vector->handle->pdev;
+ struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+ chain = head->next;
+
+ while (chain) {
+ chain_tmp = chain->next;
+ devm_kfree(&pdev->dev, chain);
+ chain = chain_tmp;
+ }
+}
+
+static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
+ struct hns3_enet_ring *ring)
+{
+ ring->next = group->ring;
+ group->ring = ring;
+
+ group->count++;
+}
+
+static void hns3_nic_set_cpumask(struct hns3_nic_priv *priv)
+{
+ struct pci_dev *pdev = priv->ae_handle->pdev;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int num_vectors = priv->vector_num;
+ int numa_node;
+ int vector_i;
+
+ numa_node = dev_to_node(&pdev->dev);
+
+ for (vector_i = 0; vector_i < num_vectors; vector_i++) {
+ tqp_vector = &priv->tqp_vector[vector_i];
+ cpumask_set_cpu(cpumask_local_spread(vector_i, numa_node),
+ &tqp_vector->affinity_mask);
+ }
+}
+
+static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int ret;
+ int i;
+
+ hns3_nic_set_cpumask(priv);
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ hns3_vector_gl_rl_init_hw(tqp_vector, priv);
+ tqp_vector->num_tqps = 0;
+ }
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ u16 vector_i = i % priv->vector_num;
+ u16 tqp_num = h->kinfo.num_tqps;
+
+ tqp_vector = &priv->tqp_vector[vector_i];
+
+ hns3_add_ring_to_group(&tqp_vector->tx_group,
+ &priv->ring[i]);
+
+ hns3_add_ring_to_group(&tqp_vector->rx_group,
+ &priv->ring[i + tqp_num]);
+
+ priv->ring[i].tqp_vector = tqp_vector;
+ priv->ring[i + tqp_num].tqp_vector = tqp_vector;
+ tqp_vector->num_tqps++;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ struct hnae3_ring_chain_node vector_ring_chain;
+
+ tqp_vector = &priv->tqp_vector[i];
+
+ tqp_vector->rx_group.total_bytes = 0;
+ tqp_vector->rx_group.total_packets = 0;
+ tqp_vector->tx_group.total_bytes = 0;
+ tqp_vector->tx_group.total_packets = 0;
+ tqp_vector->handle = h;
+
+ ret = hns3_get_vector_ring_chain(tqp_vector,
+ &vector_ring_chain);
+ if (ret)
+ goto map_ring_fail;
+
+ ret = h->ae_algo->ops->map_ring_to_vector(h,
+ tqp_vector->vector_irq, &vector_ring_chain);
+
+ hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+
+ if (ret)
+ goto map_ring_fail;
+
+ netif_napi_add(priv->netdev, &tqp_vector->napi,
+ hns3_nic_common_poll, NAPI_POLL_WEIGHT);
+ }
+
+ return 0;
+
+map_ring_fail:
+ while (i--)
+ netif_napi_del(&priv->tqp_vector[i].napi);
+
+ return ret;
+}
+
+static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
+{
+#define HNS3_VECTOR_PF_MAX_NUM 64
+
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hnae3_vector_info *vector;
+ struct pci_dev *pdev = h->pdev;
+ u16 tqp_num = h->kinfo.num_tqps;
+ u16 vector_num;
+ int ret = 0;
+ u16 i;
+
+ /* RSS size, cpu online and vector_num should be the same */
+ /* Should consider 2p/4p later */
+ vector_num = min_t(u16, num_online_cpus(), tqp_num);
+ vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM);
+
+ vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
+ GFP_KERNEL);
+ if (!vector)
+ return -ENOMEM;
+
+ /* save the actual available vector number */
+ vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
+
+ priv->vector_num = vector_num;
+ priv->tqp_vector = (struct hns3_enet_tqp_vector *)
+ devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
+ GFP_KERNEL);
+ if (!priv->tqp_vector) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+ tqp_vector->idx = i;
+ tqp_vector->mask_addr = vector[i].io_addr;
+ tqp_vector->vector_irq = vector[i].vector;
+ hns3_vector_gl_rl_init(tqp_vector, priv);
+ }
+
+out:
+ devm_kfree(&pdev->dev, vector);
+ return ret;
+}
+
+static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
+{
+ group->ring = NULL;
+ group->count = 0;
+}
+
+static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
+{
+ struct hnae3_ring_chain_node vector_ring_chain;
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ int i;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ tqp_vector = &priv->tqp_vector[i];
+
+ if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring)
+ continue;
+
+ /* Since the mapping can be overwritten, when fail to get the
+ * chain between vector and ring, we should go on to deal with
+ * the remaining options.
+ */
+ if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain))
+ dev_warn(priv->dev, "failed to get ring chain\n");
+
+ h->ae_algo->ops->unmap_ring_from_vector(h,
+ tqp_vector->vector_irq, &vector_ring_chain);
+
+ hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
+
+ hns3_clear_ring_group(&tqp_vector->rx_group);
+ hns3_clear_ring_group(&tqp_vector->tx_group);
+ netif_napi_del(&priv->tqp_vector[i].napi);
+ }
+}
+
+static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct pci_dev *pdev = h->pdev;
+ int i, ret;
+
+ for (i = 0; i < priv->vector_num; i++) {
+ struct hns3_enet_tqp_vector *tqp_vector;
+
+ tqp_vector = &priv->tqp_vector[i];
+ ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
+ if (ret)
+ return;
+ }
+
+ devm_kfree(&pdev->dev, priv->tqp_vector);
+}
+
+static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
+ unsigned int ring_type)
+{
+ int queue_num = priv->ae_handle->kinfo.num_tqps;
+ struct hns3_enet_ring *ring;
+ int desc_num;
+
+ if (ring_type == HNAE3_RING_TYPE_TX) {
+ ring = &priv->ring[q->tqp_index];
+ desc_num = priv->ae_handle->kinfo.num_tx_desc;
+ ring->queue_index = q->tqp_index;
+ } else {
+ ring = &priv->ring[q->tqp_index + queue_num];
+ desc_num = priv->ae_handle->kinfo.num_rx_desc;
+ ring->queue_index = q->tqp_index;
+ }
+
+ hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
+
+ ring->tqp = q;
+ ring->desc = NULL;
+ ring->desc_cb = NULL;
+ ring->dev = priv->dev;
+ ring->desc_dma_addr = 0;
+ ring->buf_size = q->buf_size;
+ ring->desc_num = desc_num;
+ ring->next_to_use = 0;
+ ring->next_to_clean = 0;
+ ring->last_to_use = 0;
+}
+
+static void hns3_queue_to_ring(struct hnae3_queue *tqp,
+ struct hns3_nic_priv *priv)
+{
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
+ hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
+}
+
+static int hns3_get_ring_config(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct pci_dev *pdev = h->pdev;
+ int i;
+
+ priv->ring = devm_kzalloc(&pdev->dev,
+ array3_size(h->kinfo.num_tqps,
+ sizeof(*priv->ring), 2),
+ GFP_KERNEL);
+ if (!priv->ring)
+ return -ENOMEM;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++)
+ hns3_queue_to_ring(h->kinfo.tqp[i], priv);
+
+ return 0;
+}
+
+static void hns3_put_ring_config(struct hns3_nic_priv *priv)
+{
+ if (!priv->ring)
+ return;
+
+ devm_kfree(priv->dev, priv->ring);
+ priv->ring = NULL;
+}
+
+static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
+{
+ int ret;
+
+ if (ring->desc_num <= 0 || ring->buf_size <= 0)
+ return -EINVAL;
+
+ ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num,
+ sizeof(ring->desc_cb[0]), GFP_KERNEL);
+ if (!ring->desc_cb) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = hns3_alloc_desc(ring);
+ if (ret)
+ goto out_with_desc_cb;
+
+ if (!HNAE3_IS_TX_RING(ring)) {
+ ret = hns3_alloc_ring_buffers(ring);
+ if (ret)
+ goto out_with_desc;
+ }
+
+ return 0;
+
+out_with_desc:
+ hns3_free_desc(ring);
+out_with_desc_cb:
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
+ ring->desc_cb = NULL;
+out:
+ return ret;
+}
+
+void hns3_fini_ring(struct hns3_enet_ring *ring)
+{
+ hns3_free_desc(ring);
+ devm_kfree(ring_to_dev(ring), ring->desc_cb);
+ ring->desc_cb = NULL;
+ ring->next_to_clean = 0;
+ ring->next_to_use = 0;
+ ring->last_to_use = 0;
+ ring->pending_buf = 0;
+ if (ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ }
+}
+
+static int hns3_buf_size2type(u32 buf_size)
+{
+ int bd_size_type;
+
+ switch (buf_size) {
+ case 512:
+ bd_size_type = HNS3_BD_SIZE_512_TYPE;
+ break;
+ case 1024:
+ bd_size_type = HNS3_BD_SIZE_1024_TYPE;
+ break;
+ case 2048:
+ bd_size_type = HNS3_BD_SIZE_2048_TYPE;
+ break;
+ case 4096:
+ bd_size_type = HNS3_BD_SIZE_4096_TYPE;
+ break;
+ default:
+ bd_size_type = HNS3_BD_SIZE_2048_TYPE;
+ }
+
+ return bd_size_type;
+}
+
+static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
+{
+ dma_addr_t dma = ring->desc_dma_addr;
+ struct hnae3_queue *q = ring->tqp;
+
+ if (!HNAE3_IS_TX_RING(ring)) {
+ hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG, (u32)dma);
+ hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+
+ hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
+ hns3_buf_size2type(ring->buf_size));
+ hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
+ ring->desc_num / 8 - 1);
+
+ } else {
+ hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
+ (u32)dma);
+ hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
+ (u32)((dma >> 31) >> 1));
+
+ hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
+ ring->desc_num / 8 - 1);
+ }
+}
+
+static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+ int i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
+ int j;
+
+ if (!tc_info->enable)
+ continue;
+
+ for (j = 0; j < tc_info->tqp_count; j++) {
+ struct hnae3_queue *q;
+
+ q = priv->ring[tc_info->tqp_offset + j].tqp;
+ hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
+ tc_info->tc);
+ }
+ }
+}
+
+int hns3_init_all_ring(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int ring_num = h->kinfo.num_tqps * 2;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < ring_num; i++) {
+ ret = hns3_alloc_ring_memory(&priv->ring[i]);
+ if (ret) {
+ dev_err(priv->dev,
+ "Alloc ring memory fail! ret=%d\n", ret);
+ goto out_when_alloc_ring_memory;
+ }
+
+ u64_stats_init(&priv->ring[i].syncp);
+ }
+
+ return 0;
+
+out_when_alloc_ring_memory:
+ for (j = i - 1; j >= 0; j--)
+ hns3_fini_ring(&priv->ring[j]);
+
+ return -ENOMEM;
+}
+
+int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ hns3_fini_ring(&priv->ring[i]);
+ hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
+ }
+ return 0;
+}
+
+/* Set mac addr if it is configured. or leave it to the AE driver */
+static int hns3_init_mac_addr(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u8 mac_addr_temp[ETH_ALEN] = {0};
+ int ret = 0;
+
+ if (h->ae_algo->ops->get_mac_addr)
+ h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
+
+ /* Check if the MAC address is valid, if not get a random one */
+ if (!is_valid_ether_addr(mac_addr_temp)) {
+ eth_hw_addr_random(netdev);
+ dev_warn(priv->dev, "using random MAC address %pM\n",
+ netdev->dev_addr);
+ } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) {
+ ether_addr_copy(netdev->dev_addr, mac_addr_temp);
+ ether_addr_copy(netdev->perm_addr, mac_addr_temp);
+ } else {
+ return 0;
+ }
+
+ if (h->ae_algo->ops->set_mac_addr)
+ ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
+
+ return ret;
+}
+
+static int hns3_init_phy(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ int ret = 0;
+
+ if (h->ae_algo->ops->mac_connect_phy)
+ ret = h->ae_algo->ops->mac_connect_phy(h);
+
+ return ret;
+}
+
+static void hns3_uninit_phy(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->mac_disconnect_phy)
+ h->ae_algo->ops->mac_disconnect_phy(h);
+}
+
+static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->del_all_fd_entries)
+ h->ae_algo->ops->del_all_fd_entries(h, clear_list);
+}
+
+static int hns3_client_start(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_start)
+ return 0;
+
+ return handle->ae_algo->ops->client_start(handle);
+}
+
+static void hns3_client_stop(struct hnae3_handle *handle)
+{
+ if (!handle->ae_algo->ops->client_stop)
+ return;
+
+ handle->ae_algo->ops->client_stop(handle);
+}
+
+static void hns3_info_show(struct hns3_nic_priv *priv)
+{
+ struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo;
+
+ dev_info(priv->dev, "MAC address: %pM\n", priv->netdev->dev_addr);
+ dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps);
+ dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size);
+ dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size);
+ dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len);
+ dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc);
+ dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc);
+ dev_info(priv->dev, "Total number of enabled TCs: %u\n", kinfo->num_tc);
+ dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu);
+}
+
+static int hns3_client_init(struct hnae3_handle *handle)
+{
+ struct pci_dev *pdev = handle->pdev;
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ u16 alloc_tqps, max_rss_size;
+ struct hns3_nic_priv *priv;
+ struct net_device *netdev;
+ int ret;
+
+ handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps,
+ &max_rss_size);
+ netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv), alloc_tqps);
+ if (!netdev)
+ return -ENOMEM;
+
+ priv = netdev_priv(netdev);
+ priv->dev = &pdev->dev;
+ priv->netdev = netdev;
+ priv->ae_handle = handle;
+ priv->tx_timeout_count = 0;
+ priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num;
+ set_bit(HNS3_NIC_STATE_DOWN, &priv->state);
+
+ handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL);
+
+ handle->kinfo.netdev = netdev;
+ handle->priv = (void *)priv;
+
+ hns3_init_mac_addr(netdev);
+
+ hns3_set_default_feature(netdev);
+
+ netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+ netdev->netdev_ops = &hns3_nic_netdev_ops;
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+ hns3_ethtool_set_ops(netdev);
+
+ /* Carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ ret = hns3_get_ring_config(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_get_ring_cfg;
+ }
+
+ ret = hns3_nic_alloc_vector_data(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_alloc_vector_data;
+ }
+
+ ret = hns3_nic_init_vector_data(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_init_vector_data;
+ }
+
+ ret = hns3_init_all_ring(priv);
+ if (ret) {
+ ret = -ENOMEM;
+ goto out_init_ring;
+ }
+
+ ret = hns3_init_phy(netdev);
+ if (ret)
+ goto out_init_phy;
+
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto out_init_irq_fail;
+ }
+
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto out_client_start;
+ }
+
+ hns3_dcbnl_setup(handle);
+
+ hns3_dbg_init(handle);
+
+ /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */
+ netdev->max_mtu = HNS3_MAX_MTU;
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(priv->dev, "probe register netdev fail!\n");
+ goto out_reg_netdev_fail;
+ }
+
+ if (netif_msg_drv(handle))
+ hns3_info_show(priv);
+
+ return ret;
+
+out_reg_netdev_fail:
+ hns3_dbg_uninit(handle);
+out_client_start:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+out_init_irq_fail:
+ hns3_uninit_phy(netdev);
+out_init_phy:
+ hns3_uninit_all_ring(priv);
+out_init_ring:
+ hns3_nic_uninit_vector_data(priv);
+out_init_vector_data:
+ hns3_nic_dealloc_vector_data(priv);
+out_alloc_vector_data:
+ priv->ring = NULL;
+out_get_ring_cfg:
+ priv->ae_handle = NULL;
+ free_netdev(netdev);
+ return ret;
+}
+
+static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ if (netdev->reg_state != NETREG_UNINITIALIZED)
+ unregister_netdev(netdev);
+
+ hns3_client_stop(handle);
+
+ hns3_uninit_phy(netdev);
+
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ goto out_netdev_free;
+ }
+
+ hns3_free_rx_cpu_rmap(netdev);
+
+ hns3_nic_uninit_irq(priv);
+
+ hns3_del_all_fd_rules(netdev, true);
+
+ hns3_clear_all_ring(handle, true);
+
+ hns3_nic_uninit_vector_data(priv);
+
+ hns3_nic_dealloc_vector_data(priv);
+
+ ret = hns3_uninit_all_ring(priv);
+ if (ret)
+ netdev_err(netdev, "uninit ring error\n");
+
+ hns3_put_ring_config(priv);
+
+out_netdev_free:
+ hns3_dbg_uninit(handle);
+ free_netdev(netdev);
+}
+
+static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+
+ if (!netdev)
+ return;
+
+ if (linkup) {
+ netif_tx_wake_all_queues(netdev);
+ netif_carrier_on(netdev);
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link up\n");
+ } else {
+ netif_carrier_off(netdev);
+ netif_tx_stop_all_queues(netdev);
+ if (netif_msg_link(handle))
+ netdev_info(netdev, "link down\n");
+ }
+}
+
+static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct net_device *ndev = kinfo->netdev;
+
+ if (tc > HNAE3_MAX_TC)
+ return -EINVAL;
+
+ if (!ndev)
+ return -ENODEV;
+
+ return hns3_nic_set_real_num_queue(ndev);
+}
+
+static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
+{
+ while (ring->next_to_clean != ring->next_to_use) {
+ ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0;
+ hns3_free_buffer_detach(ring, ring->next_to_clean, 0);
+ ring_ptr_move_fw(ring, next_to_clean);
+ }
+
+ ring->pending_buf = 0;
+}
+
+static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
+{
+ struct hns3_desc_cb res_cbs;
+ int ret;
+
+ while (ring->next_to_use != ring->next_to_clean) {
+ /* When a buffer is not reused, it's memory has been
+ * freed in hns3_handle_rx_bd or will be freed by
+ * stack, so we need to replace the buffer here.
+ */
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
+ ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
+ if (ret) {
+ u64_stats_update_begin(&ring->syncp);
+ ring->stats.sw_err_cnt++;
+ u64_stats_update_end(&ring->syncp);
+ /* if alloc new buffer fail, exit directly
+ * and reclear in up flow.
+ */
+ netdev_warn(ring_to_netdev(ring),
+ "reserve buffer map failed, ret = %d\n",
+ ret);
+ return ret;
+ }
+ hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
+ }
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+
+ /* Free the pending skb in rx ring */
+ if (ring->skb) {
+ dev_kfree_skb_any(ring->skb);
+ ring->skb = NULL;
+ ring->pending_buf = 0;
+ }
+
+ return 0;
+}
+
+static void hns3_force_clear_rx_ring(struct hns3_enet_ring *ring)
+{
+ while (ring->next_to_use != ring->next_to_clean) {
+ /* When a buffer is not reused, it's memory has been
+ * freed in hns3_handle_rx_bd or will be freed by
+ * stack, so only need to unmap the buffer here.
+ */
+ if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
+ hns3_unmap_buffer(ring,
+ &ring->desc_cb[ring->next_to_use]);
+ ring->desc_cb[ring->next_to_use].dma = 0;
+ }
+
+ ring_ptr_move_fw(ring, next_to_use);
+ }
+}
+
+static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ u32 i;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ struct hns3_enet_ring *ring;
+
+ ring = &priv->ring[i];
+ hns3_clear_tx_ring(ring);
+
+ ring = &priv->ring[i + h->kinfo.num_tqps];
+ /* Continue to clear other rings even if clearing some
+ * rings failed.
+ */
+ if (force)
+ hns3_force_clear_rx_ring(ring);
+ else
+ hns3_clear_rx_ring(ring);
+ }
+}
+
+int hns3_nic_reset_all_ring(struct hnae3_handle *h)
+{
+ struct net_device *ndev = h->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hns3_enet_ring *rx_ring;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ ret = h->ae_algo->ops->reset_queue(h, i);
+ if (ret)
+ return ret;
+
+ hns3_init_ring_hw(&priv->ring[i]);
+
+ /* We need to clear tx ring here because self test will
+ * use the ring and will not run down before up
+ */
+ hns3_clear_tx_ring(&priv->ring[i]);
+ priv->ring[i].next_to_clean = 0;
+ priv->ring[i].next_to_use = 0;
+ priv->ring[i].last_to_use = 0;
+
+ rx_ring = &priv->ring[i + h->kinfo.num_tqps];
+ hns3_init_ring_hw(rx_ring);
+ ret = hns3_clear_rx_ring(rx_ring);
+ if (ret)
+ return ret;
+
+ /* We can not know the hardware head and tail when this
+ * function is called in reset flow, so we reuse all desc.
+ */
+ for (j = 0; j < rx_ring->desc_num; j++)
+ hns3_reuse_buffer(rx_ring, j);
+
+ rx_ring->next_to_clean = 0;
+ rx_ring->next_to_use = 0;
+ }
+
+ hns3_init_tx_ring_tc(priv);
+
+ return 0;
+}
+
+static void hns3_store_coal(struct hns3_nic_priv *priv)
+{
+ /* ethtool only support setting and querying one coal
+ * configuration for now, so save the vector 0' coal
+ * configuration here in order to restore it.
+ */
+ memcpy(&priv->tx_coal, &priv->tqp_vector[0].tx_group.coal,
+ sizeof(struct hns3_enet_coalesce));
+ memcpy(&priv->rx_coal, &priv->tqp_vector[0].rx_group.coal,
+ sizeof(struct hns3_enet_coalesce));
+}
+
+static void hns3_restore_coal(struct hns3_nic_priv *priv)
+{
+ u16 vector_num = priv->vector_num;
+ int i;
+
+ for (i = 0; i < vector_num; i++) {
+ memcpy(&priv->tqp_vector[i].tx_group.coal, &priv->tx_coal,
+ sizeof(struct hns3_enet_coalesce));
+ memcpy(&priv->tqp_vector[i].rx_group.coal, &priv->rx_coal,
+ sizeof(struct hns3_enet_coalesce));
+ }
+}
+
+static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct net_device *ndev = kinfo->netdev;
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+
+ if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
+ return 0;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ return hns3_nic_net_stop(ndev);
+}
+
+static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
+ int ret = 0;
+
+ if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_err(kinfo->netdev, "device is not initialized yet\n");
+ return -EFAULT;
+ }
+
+ clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+
+ if (netif_running(kinfo->netdev)) {
+ ret = hns3_nic_net_open(kinfo->netdev);
+ if (ret) {
+ set_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+ netdev_err(kinfo->netdev,
+ "net up fail, ret=%d!\n", ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ /* Carrier off reporting is important to ethtool even BEFORE open */
+ netif_carrier_off(netdev);
+
+ ret = hns3_get_ring_config(priv);
+ if (ret)
+ return ret;
+
+ ret = hns3_nic_alloc_vector_data(priv);
+ if (ret)
+ goto err_put_ring;
+
+ hns3_restore_coal(priv);
+
+ ret = hns3_nic_init_vector_data(priv);
+ if (ret)
+ goto err_dealloc_vector;
+
+ ret = hns3_init_all_ring(priv);
+ if (ret)
+ goto err_uninit_vector;
+
+ /* the device can work without cpu rmap, only aRFS needs it */
+ ret = hns3_set_rx_cpu_rmap(netdev);
+ if (ret)
+ dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret);
+
+ ret = hns3_nic_init_irq(priv);
+ if (ret) {
+ dev_err(priv->dev, "init irq failed! ret=%d\n", ret);
+ hns3_free_rx_cpu_rmap(netdev);
+ goto err_init_irq_fail;
+ }
+
+ if (!hns3_is_phys_func(handle->pdev))
+ hns3_init_mac_addr(netdev);
+
+ ret = hns3_client_start(handle);
+ if (ret) {
+ dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret);
+ goto err_client_start_fail;
+ }
+
+ set_bit(HNS3_NIC_STATE_INITED, &priv->state);
+
+ return ret;
+
+err_client_start_fail:
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+err_init_irq_fail:
+ hns3_uninit_all_ring(priv);
+err_uninit_vector:
+ hns3_nic_uninit_vector_data(priv);
+err_dealloc_vector:
+ hns3_nic_dealloc_vector_data(priv);
+err_put_ring:
+ hns3_put_ring_config(priv);
+
+ return ret;
+}
+
+static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
+{
+ struct net_device *netdev = handle->kinfo.netdev;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ int ret;
+
+ if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) {
+ netdev_warn(netdev, "already uninitialized\n");
+ return 0;
+ }
+
+ hns3_free_rx_cpu_rmap(netdev);
+ hns3_nic_uninit_irq(priv);
+ hns3_clear_all_ring(handle, true);
+ hns3_reset_tx_queue(priv->ae_handle);
+
+ hns3_nic_uninit_vector_data(priv);
+
+ hns3_store_coal(priv);
+
+ hns3_nic_dealloc_vector_data(priv);
+
+ ret = hns3_uninit_all_ring(priv);
+ if (ret)
+ netdev_err(netdev, "uninit ring error\n");
+
+ hns3_put_ring_config(priv);
+
+ return ret;
+}
+
+static int hns3_reset_notify(struct hnae3_handle *handle,
+ enum hnae3_reset_notify_type type)
+{
+ int ret = 0;
+
+ switch (type) {
+ case HNAE3_UP_CLIENT:
+ ret = hns3_reset_notify_up_enet(handle);
+ break;
+ case HNAE3_DOWN_CLIENT:
+ ret = hns3_reset_notify_down_enet(handle);
+ break;
+ case HNAE3_INIT_CLIENT:
+ ret = hns3_reset_notify_init_enet(handle);
+ break;
+ case HNAE3_UNINIT_CLIENT:
+ ret = hns3_reset_notify_uninit_enet(handle);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int hns3_change_channels(struct hnae3_handle *handle, u32 new_tqp_num,
+ bool rxfh_configured)
+{
+ int ret;
+
+ ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num,
+ rxfh_configured);
+ if (ret) {
+ dev_err(&handle->pdev->dev,
+ "Change tqp num(%u) fail.\n", new_tqp_num);
+ return ret;
+ }
+
+ ret = hns3_reset_notify(handle, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(handle, HNAE3_UP_CLIENT);
+ if (ret)
+ hns3_reset_notify(handle, HNAE3_UNINIT_CLIENT);
+
+ return ret;
+}
+
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_knic_private_info *kinfo = &h->kinfo;
+ bool rxfh_configured = netif_is_rxfh_configured(netdev);
+ u32 new_tqp_num = ch->combined_count;
+ u16 org_tqp_num;
+ int ret;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (ch->rx_count || ch->tx_count)
+ return -EINVAL;
+
+ if (new_tqp_num > hns3_get_max_available_channels(h) ||
+ new_tqp_num < 1) {
+ dev_err(&netdev->dev,
+ "Change tqps fail, the tqp range is from 1 to %u",
+ hns3_get_max_available_channels(h));
+ return -EINVAL;
+ }
+
+ if (kinfo->rss_size == new_tqp_num)
+ return 0;
+
+ netif_dbg(h, drv, netdev,
+ "set channels: tqp_num=%u, rxfh=%d\n",
+ new_tqp_num, rxfh_configured);
+
+ ret = hns3_reset_notify(h, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hns3_reset_notify(h, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ org_tqp_num = h->kinfo.num_tqps;
+ ret = hns3_change_channels(h, new_tqp_num, rxfh_configured);
+ if (ret) {
+ int ret1;
+
+ netdev_warn(netdev,
+ "Change channels fail, revert to old value\n");
+ ret1 = hns3_change_channels(h, org_tqp_num, rxfh_configured);
+ if (ret1) {
+ netdev_err(netdev,
+ "revert to old channel fail\n");
+ return ret1;
+ }
+
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct hns3_hw_error_info hns3_hw_err[] = {
+ { .type = HNAE3_PPU_POISON_ERROR,
+ .msg = "PPU poison" },
+ { .type = HNAE3_CMDQ_ECC_ERROR,
+ .msg = "IMP CMDQ error" },
+ { .type = HNAE3_IMP_RD_POISON_ERROR,
+ .msg = "IMP RD poison" },
+ { .type = HNAE3_ROCEE_AXI_RESP_ERROR,
+ .msg = "ROCEE AXI RESP error" },
+};
+
+static void hns3_process_hw_error(struct hnae3_handle *handle,
+ enum hnae3_hw_error_type type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hns3_hw_err); i++) {
+ if (hns3_hw_err[i].type == type) {
+ dev_err(&handle->pdev->dev, "Detected %s!\n",
+ hns3_hw_err[i].msg);
+ break;
+ }
+ }
+}
+
+static const struct hnae3_client_ops client_ops = {
+ .init_instance = hns3_client_init,
+ .uninit_instance = hns3_client_uninit,
+ .link_status_change = hns3_link_status_change,
+ .setup_tc = hns3_client_setup_tc,
+ .reset_notify = hns3_reset_notify,
+ .process_hw_error = hns3_process_hw_error,
+};
+
+/* hns3_init_module - Driver registration routine
+ * hns3_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ */
+static int __init hns3_init_module(void)
+{
+ int ret;
+
+ pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
+ pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
+
+ client.type = HNAE3_CLIENT_KNIC;
+ snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH, "%s",
+ hns3_driver_name);
+
+ client.ops = &client_ops;
+
+ INIT_LIST_HEAD(&client.node);
+
+ hns3_dbg_register_debugfs(hns3_driver_name);
+
+ ret = hnae3_register_client(&client);
+ if (ret)
+ goto err_reg_client;
+
+ ret = pci_register_driver(&hns3_driver);
+ if (ret)
+ goto err_reg_driver;
+
+ return ret;
+
+err_reg_driver:
+ hnae3_unregister_client(&client);
+err_reg_client:
+ hns3_dbg_unregister_debugfs();
+ return ret;
+}
+module_init(hns3_init_module);
+
+/* hns3_exit_module - Driver exit cleanup routine
+ * hns3_exit_module is called just before the driver is removed
+ * from memory.
+ */
+static void __exit hns3_exit_module(void)
+{
+ pci_unregister_driver(&hns3_driver);
+ hnae3_unregister_client(&client);
+ hns3_dbg_unregister_debugfs();
+}
+module_exit(hns3_exit_module);
+
+MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("pci:hns-nic");
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
new file mode 100644
index 000000000..54d02ea4a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -0,0 +1,614 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HNS3_ENET_H
+#define __HNS3_ENET_H
+
+#include <linux/if_vlan.h>
+
+#include "hnae3.h"
+
+enum hns3_nic_state {
+ HNS3_NIC_STATE_TESTING,
+ HNS3_NIC_STATE_RESETTING,
+ HNS3_NIC_STATE_INITED,
+ HNS3_NIC_STATE_DOWN,
+ HNS3_NIC_STATE_DISABLED,
+ HNS3_NIC_STATE_REMOVING,
+ HNS3_NIC_STATE_SERVICE_INITED,
+ HNS3_NIC_STATE_SERVICE_SCHED,
+ HNS3_NIC_STATE2_RESET_REQUESTED,
+ HNS3_NIC_STATE_MAX
+};
+
+#define HNS3_RING_RX_RING_BASEADDR_L_REG 0x00000
+#define HNS3_RING_RX_RING_BASEADDR_H_REG 0x00004
+#define HNS3_RING_RX_RING_BD_NUM_REG 0x00008
+#define HNS3_RING_RX_RING_BD_LEN_REG 0x0000C
+#define HNS3_RING_RX_RING_TAIL_REG 0x00018
+#define HNS3_RING_RX_RING_HEAD_REG 0x0001C
+#define HNS3_RING_RX_RING_FBDNUM_REG 0x00020
+#define HNS3_RING_RX_RING_PKTNUM_RECORD_REG 0x0002C
+
+#define HNS3_RING_TX_RING_BASEADDR_L_REG 0x00040
+#define HNS3_RING_TX_RING_BASEADDR_H_REG 0x00044
+#define HNS3_RING_TX_RING_BD_NUM_REG 0x00048
+#define HNS3_RING_TX_RING_TC_REG 0x00050
+#define HNS3_RING_TX_RING_TAIL_REG 0x00058
+#define HNS3_RING_TX_RING_HEAD_REG 0x0005C
+#define HNS3_RING_TX_RING_FBDNUM_REG 0x00060
+#define HNS3_RING_TX_RING_OFFSET_REG 0x00064
+#define HNS3_RING_TX_RING_EBDNUM_REG 0x00068
+#define HNS3_RING_TX_RING_PKTNUM_RECORD_REG 0x0006C
+#define HNS3_RING_TX_RING_EBD_OFFSET_REG 0x00070
+#define HNS3_RING_TX_RING_BD_ERR_REG 0x00074
+#define HNS3_RING_EN_REG 0x00090
+#define HNS3_RING_RX_EN_REG 0x00098
+#define HNS3_RING_TX_EN_REG 0x000D4
+
+#define HNS3_RX_HEAD_SIZE 256
+
+#define HNS3_TX_TIMEOUT (5 * HZ)
+#define HNS3_RING_NAME_LEN 16
+#define HNS3_BUFFER_SIZE_2048 2048
+#define HNS3_RING_MAX_PENDING 32760
+#define HNS3_RING_MIN_PENDING 72
+#define HNS3_RING_BD_MULTIPLE 8
+/* max frame size of mac */
+#define HNS3_MAC_MAX_FRAME 9728
+#define HNS3_MAX_MTU \
+ (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN))
+
+#define HNS3_BD_SIZE_512_TYPE 0
+#define HNS3_BD_SIZE_1024_TYPE 1
+#define HNS3_BD_SIZE_2048_TYPE 2
+#define HNS3_BD_SIZE_4096_TYPE 3
+
+#define HNS3_RX_FLAG_VLAN_PRESENT 0x1
+#define HNS3_RX_FLAG_L3ID_IPV4 0x0
+#define HNS3_RX_FLAG_L3ID_IPV6 0x1
+#define HNS3_RX_FLAG_L4ID_UDP 0x0
+#define HNS3_RX_FLAG_L4ID_TCP 0x1
+
+#define HNS3_RXD_DMAC_S 0
+#define HNS3_RXD_DMAC_M (0x3 << HNS3_RXD_DMAC_S)
+#define HNS3_RXD_VLAN_S 2
+#define HNS3_RXD_VLAN_M (0x3 << HNS3_RXD_VLAN_S)
+#define HNS3_RXD_L3ID_S 4
+#define HNS3_RXD_L3ID_M (0xf << HNS3_RXD_L3ID_S)
+#define HNS3_RXD_L4ID_S 8
+#define HNS3_RXD_L4ID_M (0xf << HNS3_RXD_L4ID_S)
+#define HNS3_RXD_FRAG_B 12
+#define HNS3_RXD_STRP_TAGP_S 13
+#define HNS3_RXD_STRP_TAGP_M (0x3 << HNS3_RXD_STRP_TAGP_S)
+
+#define HNS3_RXD_L2E_B 16
+#define HNS3_RXD_L3E_B 17
+#define HNS3_RXD_L4E_B 18
+#define HNS3_RXD_TRUNCAT_B 19
+#define HNS3_RXD_HOI_B 20
+#define HNS3_RXD_DOI_B 21
+#define HNS3_RXD_OL3E_B 22
+#define HNS3_RXD_OL4E_B 23
+#define HNS3_RXD_GRO_COUNT_S 24
+#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
+#define HNS3_RXD_GRO_FIXID_B 30
+#define HNS3_RXD_GRO_ECN_B 31
+
+#define HNS3_RXD_ODMAC_S 0
+#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
+#define HNS3_RXD_OVLAN_S 2
+#define HNS3_RXD_OVLAN_M (0x3 << HNS3_RXD_OVLAN_S)
+#define HNS3_RXD_OL3ID_S 4
+#define HNS3_RXD_OL3ID_M (0xf << HNS3_RXD_OL3ID_S)
+#define HNS3_RXD_OL4ID_S 8
+#define HNS3_RXD_OL4ID_M (0xf << HNS3_RXD_OL4ID_S)
+#define HNS3_RXD_FBHI_S 12
+#define HNS3_RXD_FBHI_M (0x3 << HNS3_RXD_FBHI_S)
+#define HNS3_RXD_FBLI_S 14
+#define HNS3_RXD_FBLI_M (0x3 << HNS3_RXD_FBLI_S)
+
+#define HNS3_RXD_BDTYPE_S 0
+#define HNS3_RXD_BDTYPE_M (0xf << HNS3_RXD_BDTYPE_S)
+#define HNS3_RXD_VLD_B 4
+#define HNS3_RXD_UDP0_B 5
+#define HNS3_RXD_EXTEND_B 7
+#define HNS3_RXD_FE_B 8
+#define HNS3_RXD_LUM_B 9
+#define HNS3_RXD_CRCP_B 10
+#define HNS3_RXD_L3L4P_B 11
+#define HNS3_RXD_TSIND_S 12
+#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
+#define HNS3_RXD_LKBK_B 15
+#define HNS3_RXD_GRO_SIZE_S 16
+#define HNS3_RXD_GRO_SIZE_M (0x3fff << HNS3_RXD_GRO_SIZE_S)
+
+#define HNS3_TXD_L3T_S 0
+#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
+#define HNS3_TXD_L4T_S 2
+#define HNS3_TXD_L4T_M (0x3 << HNS3_TXD_L4T_S)
+#define HNS3_TXD_L3CS_B 4
+#define HNS3_TXD_L4CS_B 5
+#define HNS3_TXD_VLAN_B 6
+#define HNS3_TXD_TSO_B 7
+
+#define HNS3_TXD_L2LEN_S 8
+#define HNS3_TXD_L2LEN_M (0xff << HNS3_TXD_L2LEN_S)
+#define HNS3_TXD_L3LEN_S 16
+#define HNS3_TXD_L3LEN_M (0xff << HNS3_TXD_L3LEN_S)
+#define HNS3_TXD_L4LEN_S 24
+#define HNS3_TXD_L4LEN_M (0xff << HNS3_TXD_L4LEN_S)
+
+#define HNS3_TXD_OL3T_S 0
+#define HNS3_TXD_OL3T_M (0x3 << HNS3_TXD_OL3T_S)
+#define HNS3_TXD_OVLAN_B 2
+#define HNS3_TXD_MACSEC_B 3
+#define HNS3_TXD_TUNTYPE_S 4
+#define HNS3_TXD_TUNTYPE_M (0xf << HNS3_TXD_TUNTYPE_S)
+
+#define HNS3_TXD_BDTYPE_S 0
+#define HNS3_TXD_BDTYPE_M (0xf << HNS3_TXD_BDTYPE_S)
+#define HNS3_TXD_FE_B 4
+#define HNS3_TXD_SC_S 5
+#define HNS3_TXD_SC_M (0x3 << HNS3_TXD_SC_S)
+#define HNS3_TXD_EXTEND_B 7
+#define HNS3_TXD_VLD_B 8
+#define HNS3_TXD_RI_B 9
+#define HNS3_TXD_RA_B 10
+#define HNS3_TXD_TSYN_B 11
+#define HNS3_TXD_DECTTL_S 12
+#define HNS3_TXD_DECTTL_M (0xf << HNS3_TXD_DECTTL_S)
+
+#define HNS3_TXD_MSS_S 0
+#define HNS3_TXD_MSS_M (0x3fff << HNS3_TXD_MSS_S)
+
+#define HNS3_VECTOR_TX_IRQ BIT_ULL(0)
+#define HNS3_VECTOR_RX_IRQ BIT_ULL(1)
+
+#define HNS3_VECTOR_NOT_INITED 0
+#define HNS3_VECTOR_INITED 1
+
+#define HNS3_MAX_BD_SIZE 65535
+#define HNS3_MAX_TSO_BD_NUM 63U
+#define HNS3_MAX_TSO_SIZE 1048576U
+#define HNS3_MAX_NON_TSO_SIZE 9728U
+
+
+#define HNS3_VECTOR_GL0_OFFSET 0x100
+#define HNS3_VECTOR_GL1_OFFSET 0x200
+#define HNS3_VECTOR_GL2_OFFSET 0x300
+#define HNS3_VECTOR_RL_OFFSET 0x900
+#define HNS3_VECTOR_RL_EN_B 6
+
+#define HNS3_RING_EN_B 0
+
+enum hns3_pkt_l2t_type {
+ HNS3_L2_TYPE_UNICAST,
+ HNS3_L2_TYPE_MULTICAST,
+ HNS3_L2_TYPE_BROADCAST,
+ HNS3_L2_TYPE_INVALID,
+};
+
+enum hns3_pkt_l3t_type {
+ HNS3_L3T_NONE,
+ HNS3_L3T_IPV6,
+ HNS3_L3T_IPV4,
+ HNS3_L3T_RESERVED
+};
+
+enum hns3_pkt_l4t_type {
+ HNS3_L4T_UNKNOWN,
+ HNS3_L4T_TCP,
+ HNS3_L4T_UDP,
+ HNS3_L4T_SCTP
+};
+
+enum hns3_pkt_ol3t_type {
+ HNS3_OL3T_NONE,
+ HNS3_OL3T_IPV6,
+ HNS3_OL3T_IPV4_NO_CSUM,
+ HNS3_OL3T_IPV4_CSUM
+};
+
+enum hns3_pkt_tun_type {
+ HNS3_TUN_NONE,
+ HNS3_TUN_MAC_IN_UDP,
+ HNS3_TUN_NVGRE,
+ HNS3_TUN_OTHER
+};
+
+/* hardware spec ring buffer format */
+struct __packed hns3_desc {
+ __le64 addr;
+ union {
+ struct {
+ __le16 vlan_tag;
+ __le16 send_size;
+ union {
+ __le32 type_cs_vlan_tso_len;
+ struct {
+ __u8 type_cs_vlan_tso;
+ __u8 l2_len;
+ __u8 l3_len;
+ __u8 l4_len;
+ };
+ };
+ __le16 outer_vlan_tag;
+ __le16 tv;
+
+ union {
+ __le32 ol_type_vlan_len_msec;
+ struct {
+ __u8 ol_type_vlan_msec;
+ __u8 ol2_len;
+ __u8 ol3_len;
+ __u8 ol4_len;
+ };
+ };
+
+ __le32 paylen;
+ __le16 bdtp_fe_sc_vld_ra_ri;
+ __le16 mss;
+ } tx;
+
+ struct {
+ __le32 l234_info;
+ __le16 pkt_len;
+ __le16 size;
+
+ __le32 rss_hash;
+ __le16 fd_id;
+ __le16 vlan_tag;
+
+ union {
+ __le32 ol_info;
+ struct {
+ __le16 o_dm_vlan_id_fb;
+ __le16 ot_vlan_tag;
+ };
+ };
+
+ __le32 bd_base_info;
+ } rx;
+ };
+};
+
+struct hns3_desc_cb {
+ dma_addr_t dma; /* dma address of this desc */
+ void *buf; /* cpu addr for a desc */
+
+ /* priv data for the desc, e.g. skb when use with ip stack */
+ void *priv;
+ u32 page_offset;
+ u32 length; /* length of the buffer */
+
+ u16 reuse_flag;
+ u16 refill;
+
+ /* desc type, used by the ring user to mark the type of the priv data */
+ u16 type;
+ u16 pagecnt_bias;
+};
+
+enum hns3_pkt_l3type {
+ HNS3_L3_TYPE_IPV4,
+ HNS3_L3_TYPE_IPV6,
+ HNS3_L3_TYPE_ARP,
+ HNS3_L3_TYPE_RARP,
+ HNS3_L3_TYPE_IPV4_OPT,
+ HNS3_L3_TYPE_IPV6_EXT,
+ HNS3_L3_TYPE_LLDP,
+ HNS3_L3_TYPE_BPDU,
+ HNS3_L3_TYPE_MAC_PAUSE,
+ HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/
+
+ /* reserved for 0xA~0xB */
+
+ HNS3_L3_TYPE_CNM = 0xc,
+
+ /* reserved for 0xD~0xE */
+
+ HNS3_L3_TYPE_PARSE_FAIL = 0xf /* must be last */
+};
+
+enum hns3_pkt_l4type {
+ HNS3_L4_TYPE_UDP,
+ HNS3_L4_TYPE_TCP,
+ HNS3_L4_TYPE_GRE,
+ HNS3_L4_TYPE_SCTP,
+ HNS3_L4_TYPE_IGMP,
+ HNS3_L4_TYPE_ICMP,
+
+ /* reserved for 0x6~0xE */
+
+ HNS3_L4_TYPE_PARSE_FAIL = 0xf /* must be last */
+};
+
+enum hns3_pkt_ol3type {
+ HNS3_OL3_TYPE_IPV4 = 0,
+ HNS3_OL3_TYPE_IPV6,
+ /* reserved for 0x2~0x3 */
+ HNS3_OL3_TYPE_IPV4_OPT = 4,
+ HNS3_OL3_TYPE_IPV6_EXT,
+
+ /* reserved for 0x6~0xE */
+
+ HNS3_OL3_TYPE_PARSE_FAIL = 0xf /* must be last */
+};
+
+enum hns3_pkt_ol4type {
+ HNS3_OL4_TYPE_NO_TUN,
+ HNS3_OL4_TYPE_MAC_IN_UDP,
+ HNS3_OL4_TYPE_NVGRE,
+ HNS3_OL4_TYPE_UNKNOWN
+};
+
+struct ring_stats {
+ u64 sw_err_cnt;
+ u64 seg_pkt_cnt;
+ union {
+ struct {
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u64 tx_more;
+ u64 restart_queue;
+ u64 tx_busy;
+ u64 tx_copy;
+ u64 tx_vlan_err;
+ u64 tx_l4_proto_err;
+ u64 tx_l2l3l4_err;
+ u64 tx_tso_err;
+ u64 over_max_recursion;
+ u64 hw_limitation;
+ };
+ struct {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 rx_err_cnt;
+ u64 reuse_pg_cnt;
+ u64 err_pkt_len;
+ u64 err_bd_num;
+ u64 l2_err;
+ u64 l3l4_csum_err;
+ u64 rx_multicast;
+ u64 non_reuse_pg;
+ };
+ };
+};
+
+struct hns3_enet_ring {
+ struct hns3_desc *desc; /* dma map address space */
+ struct hns3_desc_cb *desc_cb;
+ struct hns3_enet_ring *next;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ struct hnae3_queue *tqp;
+ int queue_index;
+ struct device *dev; /* will be used for DMA mapping of descriptors */
+
+ /* statistic */
+ struct ring_stats stats;
+ struct u64_stats_sync syncp;
+
+ dma_addr_t desc_dma_addr;
+ u32 buf_size; /* size for hnae_desc->addr, preset by AE */
+ u16 desc_num; /* total number of desc */
+ int next_to_use; /* idx of next spare desc */
+
+ /* idx of lastest sent desc, the ring is empty when equal to
+ * next_to_use
+ */
+ int next_to_clean;
+ union {
+ int last_to_use; /* last idx used by xmit */
+ u32 pull_len; /* memcpy len for current rx packet */
+ };
+ u32 frag_num;
+ void *va; /* first buffer address for current packet */
+
+ u32 flag; /* ring attribute */
+
+ int pending_buf;
+ struct sk_buff *skb;
+ struct sk_buff *tail_skb;
+} ____cacheline_internodealigned_in_smp;
+
+enum hns3_flow_level_range {
+ HNS3_FLOW_LOW = 0,
+ HNS3_FLOW_MID = 1,
+ HNS3_FLOW_HIGH = 2,
+ HNS3_FLOW_ULTRA = 3,
+};
+
+#define HNS3_INT_GL_MAX 0x1FE0
+#define HNS3_INT_GL_50K 0x0014
+#define HNS3_INT_GL_20K 0x0032
+#define HNS3_INT_GL_18K 0x0036
+#define HNS3_INT_GL_8K 0x007C
+
+#define HNS3_INT_RL_MAX 0x00EC
+#define HNS3_INT_RL_ENABLE_MASK 0x40
+
+struct hns3_enet_coalesce {
+ u16 int_gl;
+ u8 gl_adapt_enable;
+ enum hns3_flow_level_range flow_level;
+};
+
+struct hns3_enet_ring_group {
+ /* array of pointers to rings */
+ struct hns3_enet_ring *ring;
+ u64 total_bytes; /* total bytes processed this group */
+ u64 total_packets; /* total packets processed this group */
+ u16 count;
+ struct hns3_enet_coalesce coal;
+};
+
+struct hns3_enet_tqp_vector {
+ struct hnae3_handle *handle;
+ u8 __iomem *mask_addr;
+ int vector_irq;
+ int irq_init_flag;
+
+ u16 idx; /* index in the TQP vector array per handle. */
+
+ struct napi_struct napi;
+
+ struct hns3_enet_ring_group rx_group;
+ struct hns3_enet_ring_group tx_group;
+
+ cpumask_t affinity_mask;
+ u16 num_tqps; /* total number of tqps in TQP vector */
+ struct irq_affinity_notify affinity_notify;
+
+ char name[HNAE3_INT_NAME_LEN];
+
+ unsigned long last_jiffies;
+} ____cacheline_internodealigned_in_smp;
+
+struct hns3_nic_priv {
+ struct hnae3_handle *ae_handle;
+ struct net_device *netdev;
+ struct device *dev;
+
+ /**
+ * the cb for nic to manage the ring buffer, the first half of the
+ * array is for tx_ring and vice versa for the second half
+ */
+ struct hns3_enet_ring *ring;
+ struct hns3_enet_tqp_vector *tqp_vector;
+ u16 vector_num;
+ u8 max_non_tso_bd_num;
+
+ u64 tx_timeout_count;
+
+ unsigned long state;
+
+ struct hns3_enet_coalesce tx_coal;
+ struct hns3_enet_coalesce rx_coal;
+};
+
+union l3_hdr_info {
+ struct iphdr *v4;
+ struct ipv6hdr *v6;
+ unsigned char *hdr;
+};
+
+union l4_hdr_info {
+ struct tcphdr *tcp;
+ struct udphdr *udp;
+ struct gre_base_hdr *gre;
+ unsigned char *hdr;
+};
+
+struct hns3_hw_error_info {
+ enum hnae3_hw_error_type type;
+ const char *msg;
+};
+
+static inline int ring_space(struct hns3_enet_ring *ring)
+{
+ /* This smp_load_acquire() pairs with smp_store_release() in
+ * hns3_nic_reclaim_one_desc called by hns3_clean_tx_ring.
+ */
+ int begin = smp_load_acquire(&ring->next_to_clean);
+ int end = READ_ONCE(ring->next_to_use);
+
+ return ((end >= begin) ? (ring->desc_num - end + begin) :
+ (begin - end)) - 1;
+}
+
+static inline u32 hns3_read_reg(void __iomem *base, u32 reg)
+{
+ return readl(base + reg);
+}
+
+static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ writel(value, reg_addr + reg);
+}
+
+#define hns3_read_dev(a, reg) \
+ hns3_read_reg((a)->io_base, (reg))
+
+static inline bool hns3_nic_resetting(struct net_device *netdev)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+
+ return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state);
+}
+
+#define hns3_write_dev(a, reg, value) \
+ hns3_write_reg((a)->io_base, (reg), (value))
+
+#define ring_to_dev(ring) ((ring)->dev)
+
+#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
+
+#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
+ DMA_TO_DEVICE : DMA_FROM_DEVICE)
+
+#define hns3_buf_size(_ring) ((_ring)->buf_size)
+
+static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
+{
+#if (PAGE_SIZE < 8192)
+ if (ring->buf_size > (PAGE_SIZE / 2))
+ return 1;
+#endif
+ return 0;
+}
+
+#define hns3_page_size(_ring) (PAGE_SIZE << hns3_page_order(_ring))
+
+/* iterator for handling rings in ring group */
+#define hns3_for_each_ring(pos, head) \
+ for (pos = (head).ring; pos; pos = pos->next)
+
+#define hns3_get_handle(ndev) \
+ (((struct hns3_nic_priv *)netdev_priv(ndev))->ae_handle)
+
+#define hns3_gl_usec_to_reg(int_gl) (int_gl >> 1)
+#define hns3_gl_round_down(int_gl) round_down(int_gl, 2)
+
+#define hns3_rl_usec_to_reg(int_rl) (int_rl >> 2)
+#define hns3_rl_round_down(int_rl) round_down(int_rl, 4)
+
+void hns3_ethtool_set_ops(struct net_device *netdev);
+int hns3_set_channels(struct net_device *netdev,
+ struct ethtool_channels *ch);
+
+void hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget);
+int hns3_init_all_ring(struct hns3_nic_priv *priv);
+int hns3_uninit_all_ring(struct hns3_nic_priv *priv);
+int hns3_nic_reset_all_ring(struct hnae3_handle *h);
+void hns3_fini_ring(struct hns3_enet_ring *ring);
+netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev);
+bool hns3_is_phys_func(struct pci_dev *pdev);
+int hns3_clean_rx_ring(
+ struct hns3_enet_ring *ring, int budget,
+ void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *));
+
+void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 gl_value);
+void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
+ u32 rl_value);
+
+void hns3_enable_vlan_filter(struct net_device *netdev, bool enable);
+void hns3_request_update_promisc_mode(struct hnae3_handle *handle);
+
+#ifdef CONFIG_HNS3_DCB
+void hns3_dcbnl_setup(struct hnae3_handle *handle);
+#else
+static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {}
+#endif
+
+void hns3_dbg_init(struct hnae3_handle *handle);
+void hns3_dbg_uninit(struct hnae3_handle *handle);
+void hns3_dbg_register_debugfs(const char *debugfs_dir_name);
+void hns3_dbg_unregister_debugfs(void);
+void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
new file mode 100644
index 000000000..d35f4b2b4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c
@@ -0,0 +1,1579 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/string.h>
+#include <linux/phy.h>
+#include <linux/sfp.h>
+
+#include "hns3_enet.h"
+
+struct hns3_stats {
+ char stats_string[ETH_GSTRING_LEN];
+ int stats_offset;
+};
+
+struct hns3_sfp_type {
+ u8 type;
+ u8 ext_type;
+};
+
+/* tqp related stats */
+#define HNS3_TQP_STAT(_string, _member) { \
+ .stats_string = _string, \
+ .stats_offset = offsetof(struct hns3_enet_ring, stats) +\
+ offsetof(struct ring_stats, _member), \
+}
+
+static const struct hns3_stats hns3_txq_stats[] = {
+ /* Tx per-queue statistics */
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", tx_pkts),
+ HNS3_TQP_STAT("bytes", tx_bytes),
+ HNS3_TQP_STAT("more", tx_more),
+ HNS3_TQP_STAT("wake", restart_queue),
+ HNS3_TQP_STAT("busy", tx_busy),
+ HNS3_TQP_STAT("copy", tx_copy),
+ HNS3_TQP_STAT("vlan_err", tx_vlan_err),
+ HNS3_TQP_STAT("l4_proto_err", tx_l4_proto_err),
+ HNS3_TQP_STAT("l2l3l4_err", tx_l2l3l4_err),
+ HNS3_TQP_STAT("tso_err", tx_tso_err),
+ HNS3_TQP_STAT("over_max_recursion", over_max_recursion),
+ HNS3_TQP_STAT("hw_limitation", hw_limitation),
+};
+
+#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
+
+static const struct hns3_stats hns3_rxq_stats[] = {
+ /* Rx per-queue statistics */
+ HNS3_TQP_STAT("dropped", sw_err_cnt),
+ HNS3_TQP_STAT("seg_pkt_cnt", seg_pkt_cnt),
+ HNS3_TQP_STAT("packets", rx_pkts),
+ HNS3_TQP_STAT("bytes", rx_bytes),
+ HNS3_TQP_STAT("errors", rx_err_cnt),
+ HNS3_TQP_STAT("reuse_pg_cnt", reuse_pg_cnt),
+ HNS3_TQP_STAT("err_pkt_len", err_pkt_len),
+ HNS3_TQP_STAT("err_bd_num", err_bd_num),
+ HNS3_TQP_STAT("l2_err", l2_err),
+ HNS3_TQP_STAT("l3l4_csum_err", l3l4_csum_err),
+ HNS3_TQP_STAT("multicast", rx_multicast),
+ HNS3_TQP_STAT("non_reuse_pg", non_reuse_pg),
+};
+
+#define HNS3_RXQ_STATS_COUNT ARRAY_SIZE(hns3_rxq_stats)
+
+#define HNS3_TQP_STATS_COUNT (HNS3_TXQ_STATS_COUNT + HNS3_RXQ_STATS_COUNT)
+
+#define HNS3_SELF_TEST_TYPE_NUM 4
+#define HNS3_NIC_LB_TEST_PKT_NUM 1
+#define HNS3_NIC_LB_TEST_RING_ID 0
+#define HNS3_NIC_LB_TEST_PACKET_SIZE 128
+#define HNS3_NIC_LB_SETUP_USEC 10000
+
+/* Nic loopback test err */
+#define HNS3_NIC_LB_TEST_NO_MEM_ERR 1
+#define HNS3_NIC_LB_TEST_TX_CNT_ERR 2
+#define HNS3_NIC_LB_TEST_RX_CNT_ERR 3
+
+static int hns3_lp_setup(struct net_device *ndev, enum hnae3_loop loop, bool en)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+ bool vlan_filter_enable;
+ int ret;
+
+ if (!h->ae_algo->ops->set_loopback ||
+ !h->ae_algo->ops->set_promisc_mode)
+ return -EOPNOTSUPP;
+
+ switch (loop) {
+ case HNAE3_LOOP_SERIAL_SERDES:
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ case HNAE3_LOOP_APP:
+ case HNAE3_LOOP_PHY:
+ ret = h->ae_algo->ops->set_loopback(h, loop, en);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ break;
+ }
+
+ if (ret || ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ return ret;
+
+ if (en) {
+ h->ae_algo->ops->set_promisc_mode(h, true, true);
+ } else {
+ /* recover promisc mode before loopback test */
+ hns3_request_update_promisc_mode(h);
+ vlan_filter_enable = ndev->flags & IFF_PROMISC ? false : true;
+ hns3_enable_vlan_filter(ndev, vlan_filter_enable);
+ }
+
+ return ret;
+}
+
+static int hns3_lp_up(struct net_device *ndev, enum hnae3_loop loop_mode)
+{
+ struct hnae3_handle *h = hns3_get_handle(ndev);
+ int ret;
+
+ ret = hns3_nic_reset_all_ring(h);
+ if (ret)
+ return ret;
+
+ ret = hns3_lp_setup(ndev, loop_mode, true);
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
+
+ return ret;
+}
+
+static int hns3_lp_down(struct net_device *ndev, enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hns3_lp_setup(ndev, loop_mode, false);
+ if (ret) {
+ netdev_err(ndev, "lb_setup return error: %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(HNS3_NIC_LB_SETUP_USEC, HNS3_NIC_LB_SETUP_USEC * 2);
+
+ return 0;
+}
+
+static void hns3_lp_setup_skb(struct sk_buff *skb)
+{
+#define HNS3_NIC_LB_DST_MAC_ADDR 0x1f
+
+ struct net_device *ndev = skb->dev;
+ struct hnae3_handle *handle;
+ struct hnae3_ae_dev *ae_dev;
+ unsigned char *packet;
+ struct ethhdr *ethh;
+ unsigned int i;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ ethh = skb_put(skb, sizeof(struct ethhdr));
+ packet = skb_put(skb, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+ memcpy(ethh->h_dest, ndev->dev_addr, ETH_ALEN);
+
+ /* The dst mac addr of loopback packet is the same as the host'
+ * mac addr, the SSU component may loop back the packet to host
+ * before the packet reaches mac or serdes, which will defect
+ * the purpose of mac or serdes selftest.
+ */
+ handle = hns3_get_handle(ndev);
+ ae_dev = pci_get_drvdata(handle->pdev);
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ ethh->h_dest[5] += HNS3_NIC_LB_DST_MAC_ADDR;
+ eth_zero_addr(ethh->h_source);
+ ethh->h_proto = htons(ETH_P_ARP);
+ skb_reset_mac_header(skb);
+
+ for (i = 0; i < HNS3_NIC_LB_TEST_PACKET_SIZE; i++)
+ packet[i] = (unsigned char)(i & 0xff);
+}
+
+static void hns3_lb_check_skb_data(struct hns3_enet_ring *ring,
+ struct sk_buff *skb)
+{
+ struct hns3_enet_tqp_vector *tqp_vector = ring->tqp_vector;
+ unsigned char *packet = skb->data;
+ u32 len = skb_headlen(skb);
+ u32 i;
+
+ len = min_t(u32, len, HNS3_NIC_LB_TEST_PACKET_SIZE);
+
+ for (i = 0; i < len; i++)
+ if (packet[i] != (unsigned char)(i & 0xff))
+ break;
+
+ /* The packet is correctly received */
+ if (i == HNS3_NIC_LB_TEST_PACKET_SIZE)
+ tqp_vector->rx_group.total_packets++;
+ else
+ print_hex_dump(KERN_ERR, "selftest:", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, len, true);
+
+ dev_kfree_skb_any(skb);
+}
+
+static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hnae3_knic_private_info *kinfo;
+ u32 i, rcv_good_pkt_total = 0;
+
+ kinfo = &h->kinfo;
+ for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
+ struct hns3_enet_ring *ring = &priv->ring[i];
+ struct hns3_enet_ring_group *rx_group;
+ u64 pre_rx_pkt;
+
+ rx_group = &ring->tqp_vector->rx_group;
+ pre_rx_pkt = rx_group->total_packets;
+
+ preempt_disable();
+ hns3_clean_rx_ring(ring, budget, hns3_lb_check_skb_data);
+ preempt_enable();
+
+ rcv_good_pkt_total += (rx_group->total_packets - pre_rx_pkt);
+ rx_group->total_packets = pre_rx_pkt;
+ }
+ return rcv_good_pkt_total;
+}
+
+static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
+ u32 end_ringid, u32 budget)
+{
+ u32 i;
+
+ for (i = start_ringid; i <= end_ringid; i++) {
+ struct hns3_enet_ring *ring = &priv->ring[i];
+
+ hns3_clean_tx_ring(ring, 0);
+ }
+}
+
+/**
+ * hns3_lp_run_test - run loopback test
+ * @ndev: net device
+ * @mode: loopback type
+ */
+static int hns3_lp_run_test(struct net_device *ndev, enum hnae3_loop mode)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct sk_buff *skb;
+ u32 i, good_cnt;
+ int ret_val = 0;
+
+ skb = alloc_skb(HNS3_NIC_LB_TEST_PACKET_SIZE + ETH_HLEN + NET_IP_ALIGN,
+ GFP_KERNEL);
+ if (!skb)
+ return HNS3_NIC_LB_TEST_NO_MEM_ERR;
+
+ skb->dev = ndev;
+ hns3_lp_setup_skb(skb);
+ skb->queue_mapping = HNS3_NIC_LB_TEST_RING_ID;
+
+ good_cnt = 0;
+ for (i = 0; i < HNS3_NIC_LB_TEST_PKT_NUM; i++) {
+ netdev_tx_t tx_ret;
+
+ skb_get(skb);
+ tx_ret = hns3_nic_net_xmit(skb, ndev);
+ if (tx_ret == NETDEV_TX_OK) {
+ good_cnt++;
+ } else {
+ kfree_skb(skb);
+ netdev_err(ndev, "hns3_lb_run_test xmit failed: %d\n",
+ tx_ret);
+ }
+ }
+ if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
+ ret_val = HNS3_NIC_LB_TEST_TX_CNT_ERR;
+ netdev_err(ndev, "mode %d sent fail, cnt=0x%x, budget=0x%x\n",
+ mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM);
+ goto out;
+ }
+
+ /* Allow 200 milliseconds for packets to go from Tx to Rx */
+ msleep(200);
+
+ good_cnt = hns3_lb_check_rx_ring(priv, HNS3_NIC_LB_TEST_PKT_NUM);
+ if (good_cnt != HNS3_NIC_LB_TEST_PKT_NUM) {
+ ret_val = HNS3_NIC_LB_TEST_RX_CNT_ERR;
+ netdev_err(ndev, "mode %d recv fail, cnt=0x%x, budget=0x%x\n",
+ mode, good_cnt, HNS3_NIC_LB_TEST_PKT_NUM);
+ }
+
+out:
+ hns3_lb_clear_tx_ring(priv, HNS3_NIC_LB_TEST_RING_ID,
+ HNS3_NIC_LB_TEST_RING_ID,
+ HNS3_NIC_LB_TEST_PKT_NUM);
+
+ kfree_skb(skb);
+ return ret_val;
+}
+
+static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2])
+{
+ st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP;
+ st_param[HNAE3_LOOP_APP][1] =
+ h->flags & HNAE3_SUPPORT_APP_LOOPBACK;
+
+ st_param[HNAE3_LOOP_SERIAL_SERDES][0] = HNAE3_LOOP_SERIAL_SERDES;
+ st_param[HNAE3_LOOP_SERIAL_SERDES][1] =
+ h->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][0] =
+ HNAE3_LOOP_PARALLEL_SERDES;
+ st_param[HNAE3_LOOP_PARALLEL_SERDES][1] =
+ h->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+
+ st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY;
+ st_param[HNAE3_LOOP_PHY][1] =
+ h->flags & HNAE3_SUPPORT_PHY_LOOPBACK;
+}
+
+static void hns3_selftest_prepare(struct net_device *ndev,
+ bool if_running, int (*st_param)[2])
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test start\n");
+
+ hns3_set_selftest_param(h, st_param);
+
+ if (if_running)
+ ndev->netdev_ops->ndo_stop(ndev);
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ /* Disable the vlan filter for selftest does not support it */
+ if (h->ae_algo->ops->enable_vlan_filter &&
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, false);
+#endif
+
+ /* Tell firmware to stop mac autoneg before loopback test start,
+ * otherwise loopback test may be failed when the port is still
+ * negotiating.
+ */
+ if (h->ae_algo->ops->halt_autoneg)
+ h->ae_algo->ops->halt_autoneg(h, true);
+
+ set_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+}
+
+static void hns3_selftest_restore(struct net_device *ndev, bool if_running)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+
+ clear_bit(HNS3_NIC_STATE_TESTING, &priv->state);
+
+ if (h->ae_algo->ops->halt_autoneg)
+ h->ae_algo->ops->halt_autoneg(h, false);
+
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+ if (h->ae_algo->ops->enable_vlan_filter &&
+ ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ h->ae_algo->ops->enable_vlan_filter(h, true);
+#endif
+
+ if (if_running)
+ ndev->netdev_ops->ndo_open(ndev);
+
+ if (netif_msg_ifdown(h))
+ netdev_info(ndev, "self test end\n");
+}
+
+static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2],
+ struct ethtool_test *eth_test, u64 *data)
+{
+ int test_index = 0;
+ u32 i;
+
+ for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) {
+ enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0];
+
+ if (!st_param[i][1])
+ continue;
+
+ data[test_index] = hns3_lp_up(ndev, loop_type);
+ if (!data[test_index])
+ data[test_index] = hns3_lp_run_test(ndev, loop_type);
+
+ hns3_lp_down(ndev, loop_type);
+
+ if (data[test_index])
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+
+ test_index++;
+ }
+}
+
+/**
+ * hns3_nic_self_test - self test
+ * @ndev: net device
+ * @eth_test: test cmd
+ * @data: test result
+ */
+static void hns3_self_test(struct net_device *ndev,
+ struct ethtool_test *eth_test, u64 *data)
+{
+ int st_param[HNS3_SELF_TEST_TYPE_NUM][2];
+ bool if_running = netif_running(ndev);
+
+ if (hns3_nic_resetting(ndev)) {
+ netdev_err(ndev, "dev resetting!");
+ return;
+ }
+
+ /* Only do offline selftest, or pass by default */
+ if (eth_test->flags != ETH_TEST_FL_OFFLINE)
+ return;
+
+ hns3_selftest_prepare(ndev, if_running, st_param);
+ hns3_do_selftest(ndev, st_param, eth_test, data);
+ hns3_selftest_restore(ndev, if_running);
+}
+
+static int hns3_get_sset_count(struct net_device *netdev, int stringset)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+
+ if (!ops->get_sset_count)
+ return -EOPNOTSUPP;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ return ((HNS3_TQP_STATS_COUNT * h->kinfo.num_tqps) +
+ ops->get_sset_count(h, stringset));
+
+ case ETH_SS_TEST:
+ return ops->get_sset_count(h, stringset);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void *hns3_update_strings(u8 *data, const struct hns3_stats *stats,
+ u32 stat_count, u32 num_tqps, const char *prefix)
+{
+#define MAX_PREFIX_SIZE (6 + 4)
+ u32 size_left;
+ u32 i, j;
+ u32 n1;
+
+ for (i = 0; i < num_tqps; i++) {
+ for (j = 0; j < stat_count; j++) {
+ data[ETH_GSTRING_LEN - 1] = '\0';
+
+ /* first, prepend the prefix string */
+ n1 = scnprintf(data, MAX_PREFIX_SIZE, "%s%d_",
+ prefix, i);
+ size_left = (ETH_GSTRING_LEN - 1) - n1;
+
+ /* now, concatenate the stats string to it */
+ strncat(data, stats[j].stats_string, size_left);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ return data;
+}
+
+static u8 *hns3_get_strings_tqps(struct hnae3_handle *handle, u8 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ const char tx_prefix[] = "txq";
+ const char rx_prefix[] = "rxq";
+
+ /* get strings for Tx */
+ data = hns3_update_strings(data, hns3_txq_stats, HNS3_TXQ_STATS_COUNT,
+ kinfo->num_tqps, tx_prefix);
+
+ /* get strings for Rx */
+ data = hns3_update_strings(data, hns3_rxq_stats, HNS3_RXQ_STATS_COUNT,
+ kinfo->num_tqps, rx_prefix);
+
+ return data;
+}
+
+static void hns3_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+ char *buff = (char *)data;
+
+ if (!ops->get_strings)
+ return;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ buff = hns3_get_strings_tqps(h, buff);
+ ops->get_strings(h, stringset, (u8 *)buff);
+ break;
+ case ETH_SS_TEST:
+ ops->get_strings(h, stringset, data);
+ break;
+ default:
+ break;
+ }
+}
+
+static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
+{
+ struct hns3_nic_priv *nic_priv = (struct hns3_nic_priv *)handle->priv;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hns3_enet_ring *ring;
+ u8 *stat;
+ int i, j;
+
+ /* get stats for Tx */
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ ring = &nic_priv->ring[i];
+ for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
+ *data++ = *(u64 *)stat;
+ }
+ }
+
+ /* get stats for Rx */
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ ring = &nic_priv->ring[i + kinfo->num_tqps];
+ for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
+ stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
+ *data++ = *(u64 *)stat;
+ }
+ }
+
+ return data;
+}
+
+/* hns3_get_stats - get detail statistics.
+ * @netdev: net device
+ * @stats: statistics info.
+ * @data: statistics data.
+ */
+static void hns3_get_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u64 *p = data;
+
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting, could not get stats\n");
+ return;
+ }
+
+ if (!h->ae_algo->ops->get_stats || !h->ae_algo->ops->update_stats) {
+ netdev_err(netdev, "could not get any statistics\n");
+ return;
+ }
+
+ h->ae_algo->ops->update_stats(h, &netdev->stats);
+
+ /* get per-queue stats */
+ p = hns3_get_stats_tqps(h, p);
+
+ /* get MAC & other misc hardware stats */
+ h->ae_algo->ops->get_stats(h, p);
+}
+
+static void hns3_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u32 fw_version;
+
+ if (!h->ae_algo->ops->get_fw_version) {
+ netdev_err(netdev, "could not get fw version!\n");
+ return;
+ }
+
+ strncpy(drvinfo->driver, h->pdev->driver->name,
+ sizeof(drvinfo->driver));
+ drvinfo->driver[sizeof(drvinfo->driver) - 1] = '\0';
+
+ strncpy(drvinfo->bus_info, pci_name(h->pdev),
+ sizeof(drvinfo->bus_info));
+ drvinfo->bus_info[ETHTOOL_BUSINFO_LEN - 1] = '\0';
+
+ fw_version = priv->ae_handle->ae_algo->ops->get_fw_version(h);
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%lu.%lu.%lu.%lu",
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+}
+
+static u32 hns3_get_link(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_status)
+ return h->ae_algo->ops->get_status(h);
+ else
+ return 0;
+}
+
+static void hns3_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *param)
+{
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
+
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return;
+ }
+
+ param->tx_max_pending = HNS3_RING_MAX_PENDING;
+ param->rx_max_pending = HNS3_RING_MAX_PENDING;
+
+ param->tx_pending = priv->ring[0].desc_num;
+ param->rx_pending = priv->ring[queue_num].desc_num;
+}
+
+static void hns3_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *param)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_pauseparam)
+ h->ae_algo->ops->get_pauseparam(h, &param->autoneg,
+ &param->rx_pause, &param->tx_pause);
+}
+
+static int hns3_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *param)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ netif_dbg(h, drv, netdev,
+ "set pauseparam: autoneg=%u, rx:%u, tx:%u\n",
+ param->autoneg, param->rx_pause, param->tx_pause);
+
+ if (h->ae_algo->ops->set_pauseparam)
+ return h->ae_algo->ops->set_pauseparam(h, param->autoneg,
+ param->rx_pause,
+ param->tx_pause);
+ return -EOPNOTSUPP;
+}
+
+static void hns3_get_ksettings(struct hnae3_handle *h,
+ struct ethtool_link_ksettings *cmd)
+{
+ const struct hnae3_ae_ops *ops = h->ae_algo->ops;
+
+ /* 1.auto_neg & speed & duplex from cmd */
+ if (ops->get_ksettings_an_result)
+ ops->get_ksettings_an_result(h,
+ &cmd->base.autoneg,
+ &cmd->base.speed,
+ &cmd->base.duplex);
+
+ /* 2.get link mode */
+ if (ops->get_link_mode)
+ ops->get_link_mode(h,
+ cmd->link_modes.supported,
+ cmd->link_modes.advertising);
+
+ /* 3.mdix_ctrl&mdix get from phy reg */
+ if (ops->get_mdix_mode)
+ ops->get_mdix_mode(h, &cmd->base.eth_tp_mdix_ctrl,
+ &cmd->base.eth_tp_mdix);
+}
+
+static int hns3_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops;
+ u8 module_type;
+ u8 media_type;
+ u8 link_stat;
+
+ ops = h->ae_algo->ops;
+ if (ops->get_media_type)
+ ops->get_media_type(h, &media_type, &module_type);
+ else
+ return -EOPNOTSUPP;
+
+ switch (media_type) {
+ case HNAE3_MEDIA_TYPE_NONE:
+ cmd->base.port = PORT_NONE;
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_FIBER:
+ if (module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ cmd->base.port = PORT_OTHER;
+ else if (module_type == HNAE3_MODULE_TYPE_CR)
+ cmd->base.port = PORT_DA;
+ else
+ cmd->base.port = PORT_FIBRE;
+
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_BACKPLANE:
+ cmd->base.port = PORT_NONE;
+ hns3_get_ksettings(h, cmd);
+ break;
+ case HNAE3_MEDIA_TYPE_COPPER:
+ cmd->base.port = PORT_TP;
+ if (!netdev->phydev)
+ hns3_get_ksettings(h, cmd);
+ else
+ phy_ethtool_ksettings_get(netdev->phydev, cmd);
+ break;
+ default:
+
+ netdev_warn(netdev, "Unknown media type");
+ return 0;
+ }
+
+ /* mdio_support */
+ cmd->base.mdio_support = ETH_MDIO_SUPPORTS_C22;
+
+ link_stat = hns3_get_link(netdev);
+ if (!link_stat) {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ }
+
+ return 0;
+}
+
+static int hns3_check_ksettings_param(const struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u8 module_type = HNAE3_MODULE_TYPE_UNKNOWN;
+ u8 media_type = HNAE3_MEDIA_TYPE_UNKNOWN;
+ u8 autoneg;
+ u32 speed;
+ u8 duplex;
+ int ret;
+
+ /* hw doesn't support use specified speed and duplex to negotiate,
+ * unnecessary to check them when autoneg on.
+ */
+ if (cmd->base.autoneg)
+ return 0;
+
+ if (ops->get_ksettings_an_result) {
+ ops->get_ksettings_an_result(handle, &autoneg, &speed, &duplex);
+ if (cmd->base.autoneg == autoneg && cmd->base.speed == speed &&
+ cmd->base.duplex == duplex)
+ return 0;
+ }
+
+ if (ops->get_media_type)
+ ops->get_media_type(handle, &media_type, &module_type);
+
+ if (cmd->base.duplex == DUPLEX_HALF &&
+ media_type != HNAE3_MEDIA_TYPE_COPPER) {
+ netdev_err(netdev,
+ "only copper port supports half duplex!");
+ return -EINVAL;
+ }
+
+ if (ops->check_port_speed) {
+ ret = ops->check_port_speed(handle, cmd->base.speed);
+ if (ret) {
+ netdev_err(netdev, "unsupported speed\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hns3_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ int ret;
+
+ /* Chip don't support this mode. */
+ if (cmd->base.speed == SPEED_1000 && cmd->base.duplex == DUPLEX_HALF)
+ return -EINVAL;
+
+ netif_dbg(handle, drv, netdev,
+ "set link(%s): autoneg=%u, speed=%u, duplex=%u\n",
+ netdev->phydev ? "phy" : "mac",
+ cmd->base.autoneg, cmd->base.speed, cmd->base.duplex);
+
+ /* Only support ksettings_set for netdev with phy attached for now */
+ if (netdev->phydev) {
+ if (cmd->base.speed == SPEED_1000 &&
+ cmd->base.autoneg == AUTONEG_DISABLE)
+ return -EINVAL;
+
+ return phy_ethtool_ksettings_set(netdev->phydev, cmd);
+ }
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ ret = hns3_check_ksettings_param(netdev, cmd);
+ if (ret)
+ return ret;
+
+ if (ops->set_autoneg) {
+ ret = ops->set_autoneg(handle, cmd->base.autoneg);
+ if (ret)
+ return ret;
+ }
+
+ /* hw doesn't support use specified speed and duplex to negotiate,
+ * ignore them when autoneg on.
+ */
+ if (cmd->base.autoneg) {
+ netdev_info(netdev,
+ "autoneg is on, ignore the speed and duplex\n");
+ return 0;
+ }
+
+ if (ops->cfg_mac_speed_dup_h)
+ ret = ops->cfg_mac_speed_dup_h(handle, cmd->base.speed,
+ cmd->base.duplex);
+
+ return ret;
+}
+
+static u32 hns3_get_rss_key_size(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_rss_key_size)
+ return 0;
+
+ return h->ae_algo->ops->get_rss_key_size(h);
+}
+
+static u32 hns3_get_rss_indir_size(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_rss_indir_size)
+ return 0;
+
+ return h->ae_algo->ops->get_rss_indir_size(h);
+}
+
+static int hns3_get_rss(struct net_device *netdev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_rss)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_rss(h, indir, key, hfunc);
+}
+
+static int hns3_set_rss(struct net_device *netdev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev);
+
+ if (!h->ae_algo->ops->set_rss)
+ return -EOPNOTSUPP;
+
+ if ((ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 &&
+ hfunc != ETH_RSS_HASH_TOP) || (hfunc != ETH_RSS_HASH_NO_CHANGE &&
+ hfunc != ETH_RSS_HASH_TOP && hfunc != ETH_RSS_HASH_XOR)) {
+ netdev_err(netdev, "hash func not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!indir) {
+ netdev_err(netdev,
+ "set rss failed for indir is empty\n");
+ return -EOPNOTSUPP;
+ }
+
+ return h->ae_algo->ops->set_rss(h, indir, key, hfunc);
+}
+
+static int hns3_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = h->kinfo.num_tqps;
+ return 0;
+ case ETHTOOL_GRXFH:
+ if (h->ae_algo->ops->get_rss_tuple)
+ return h->ae_algo->ops->get_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (h->ae_algo->ops->get_fd_rule_cnt)
+ return h->ae_algo->ops->get_fd_rule_cnt(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRULE:
+ if (h->ae_algo->ops->get_fd_rule_info)
+ return h->ae_algo->ops->get_fd_rule_info(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_GRXCLSRLALL:
+ if (h->ae_algo->ops->get_fd_all_rules)
+ return h->ae_algo->ops->get_fd_all_rules(h, cmd,
+ rule_locs);
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
+ u32 tx_desc_num, u32 rx_desc_num)
+{
+ struct hnae3_handle *h = priv->ae_handle;
+ int i;
+
+ h->kinfo.num_tx_desc = tx_desc_num;
+ h->kinfo.num_rx_desc = rx_desc_num;
+
+ for (i = 0; i < h->kinfo.num_tqps; i++) {
+ priv->ring[i].desc_num = tx_desc_num;
+ priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
+ }
+}
+
+static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
+{
+ struct hnae3_handle *handle = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ int i;
+
+ tmp_rings = kcalloc(handle->kinfo.num_tqps * 2,
+ sizeof(struct hns3_enet_ring), GFP_KERNEL);
+ if (!tmp_rings)
+ return NULL;
+
+ for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
+ memcpy(&tmp_rings[i], &priv->ring[i],
+ sizeof(struct hns3_enet_ring));
+ tmp_rings[i].skb = NULL;
+ }
+
+ return tmp_rings;
+}
+
+static int hns3_check_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
+ if (hns3_nic_resetting(ndev))
+ return -EBUSY;
+
+ if (param->rx_mini_pending || param->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (param->tx_pending > HNS3_RING_MAX_PENDING ||
+ param->tx_pending < HNS3_RING_MIN_PENDING ||
+ param->rx_pending > HNS3_RING_MAX_PENDING ||
+ param->rx_pending < HNS3_RING_MIN_PENDING) {
+ netdev_err(ndev, "Queue depth out of range [%d-%d]\n",
+ HNS3_RING_MIN_PENDING, HNS3_RING_MAX_PENDING);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns3_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *param)
+{
+ struct hns3_nic_priv *priv = netdev_priv(ndev);
+ struct hnae3_handle *h = priv->ae_handle;
+ struct hns3_enet_ring *tmp_rings;
+ bool if_running = netif_running(ndev);
+ u32 old_tx_desc_num, new_tx_desc_num;
+ u32 old_rx_desc_num, new_rx_desc_num;
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret, i;
+
+ ret = hns3_check_ringparam(ndev, param);
+ if (ret)
+ return ret;
+
+ /* Hardware requires that its descriptors must be multiple of eight */
+ new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
+ new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
+ old_tx_desc_num = priv->ring[0].desc_num;
+ old_rx_desc_num = priv->ring[queue_num].desc_num;
+ if (old_tx_desc_num == new_tx_desc_num &&
+ old_rx_desc_num == new_rx_desc_num)
+ return 0;
+
+ tmp_rings = hns3_backup_ringparam(priv);
+ if (!tmp_rings) {
+ netdev_err(ndev,
+ "backup ring param failed by allocating memory fail\n");
+ return -ENOMEM;
+ }
+
+ netdev_info(ndev,
+ "Changing Tx/Rx ring depth from %u/%u to %u/%u\n",
+ old_tx_desc_num, old_rx_desc_num,
+ new_tx_desc_num, new_rx_desc_num);
+
+ if (if_running)
+ ndev->netdev_ops->ndo_stop(ndev);
+
+ hns3_change_all_ring_bd_num(priv, new_tx_desc_num, new_rx_desc_num);
+ ret = hns3_init_all_ring(priv);
+ if (ret) {
+ netdev_err(ndev, "Change bd num fail, revert to old value(%d)\n",
+ ret);
+
+ hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
+ old_rx_desc_num);
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ memcpy(&priv->ring[i], &tmp_rings[i],
+ sizeof(struct hns3_enet_ring));
+ } else {
+ for (i = 0; i < h->kinfo.num_tqps * 2; i++)
+ hns3_fini_ring(&tmp_rings[i]);
+ }
+
+ kfree(tmp_rings);
+
+ if (if_running)
+ ret = ndev->netdev_ops->ndo_open(ndev);
+
+ return ret;
+}
+
+static int hns3_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ if (h->ae_algo->ops->set_rss_tuple)
+ return h->ae_algo->ops->set_rss_tuple(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_SRXCLSRLINS:
+ if (h->ae_algo->ops->add_fd_entry)
+ return h->ae_algo->ops->add_fd_entry(h, cmd);
+ return -EOPNOTSUPP;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (h->ae_algo->ops->del_fd_entry)
+ return h->ae_algo->ops->del_fd_entry(h, cmd);
+ return -EOPNOTSUPP;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int hns3_nway_reset(struct net_device *netdev)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct phy_device *phy = netdev->phydev;
+ int autoneg;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (hns3_nic_resetting(netdev)) {
+ netdev_err(netdev, "dev resetting!");
+ return -EBUSY;
+ }
+
+ if (!ops->get_autoneg || !ops->restart_autoneg)
+ return -EOPNOTSUPP;
+
+ autoneg = ops->get_autoneg(handle);
+ if (autoneg != AUTONEG_ENABLE) {
+ netdev_err(netdev,
+ "Autoneg is off, don't support to restart it\n");
+ return -EINVAL;
+ }
+
+ netif_dbg(handle, drv, netdev,
+ "nway reset (using %s)\n", phy ? "phy" : "mac");
+
+ if (phy)
+ return genphy_restart_aneg(phy);
+
+ return ops->restart_autoneg(handle);
+}
+
+static void hns3_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->ae_algo->ops->get_channels)
+ h->ae_algo->ops->get_channels(h, ch);
+}
+
+static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
+ struct ethtool_coalesce *cmd)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ u16 queue_num = h->kinfo.num_tqps;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ if (queue >= queue_num) {
+ netdev_err(netdev,
+ "Invalid queue value %u! Queue max id=%u\n",
+ queue, queue_num - 1);
+ return -EINVAL;
+ }
+
+ tx_vector = priv->ring[queue].tqp_vector;
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
+
+ cmd->use_adaptive_tx_coalesce =
+ tx_vector->tx_group.coal.gl_adapt_enable;
+ cmd->use_adaptive_rx_coalesce =
+ rx_vector->rx_group.coal.gl_adapt_enable;
+
+ cmd->tx_coalesce_usecs = tx_vector->tx_group.coal.int_gl;
+ cmd->rx_coalesce_usecs = rx_vector->rx_group.coal.int_gl;
+
+ cmd->tx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+ cmd->rx_coalesce_usecs_high = h->kinfo.int_rl_setting;
+
+ return 0;
+}
+
+static int hns3_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ return hns3_get_coalesce_per_queue(netdev, 0, cmd);
+}
+
+static int hns3_check_gl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rx_gl, tx_gl;
+
+ if (cmd->rx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid rx-usecs value, rx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ if (cmd->tx_coalesce_usecs > HNS3_INT_GL_MAX) {
+ netdev_err(netdev,
+ "Invalid tx-usecs value, tx-usecs range is 0-%d\n",
+ HNS3_INT_GL_MAX);
+ return -EINVAL;
+ }
+
+ rx_gl = hns3_gl_round_down(cmd->rx_coalesce_usecs);
+ if (rx_gl != cmd->rx_coalesce_usecs) {
+ netdev_info(netdev,
+ "rx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
+ cmd->rx_coalesce_usecs, rx_gl);
+ }
+
+ tx_gl = hns3_gl_round_down(cmd->tx_coalesce_usecs);
+ if (tx_gl != cmd->tx_coalesce_usecs) {
+ netdev_info(netdev,
+ "tx_usecs(%u) rounded down to %u, because it must be multiple of 2.\n",
+ cmd->tx_coalesce_usecs, tx_gl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_rl_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ u32 rl;
+
+ if (cmd->tx_coalesce_usecs_high != cmd->rx_coalesce_usecs_high) {
+ netdev_err(netdev,
+ "tx_usecs_high must be same as rx_usecs_high.\n");
+ return -EINVAL;
+ }
+
+ if (cmd->rx_coalesce_usecs_high > HNS3_INT_RL_MAX) {
+ netdev_err(netdev,
+ "Invalid usecs_high value, usecs_high range is 0-%d\n",
+ HNS3_INT_RL_MAX);
+ return -EINVAL;
+ }
+
+ rl = hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+ if (rl != cmd->rx_coalesce_usecs_high) {
+ netdev_info(netdev,
+ "usecs_high(%u) rounded down to %u, because it must be multiple of 4.\n",
+ cmd->rx_coalesce_usecs_high, rl);
+ }
+
+ return 0;
+}
+
+static int hns3_check_coalesce_para(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ int ret;
+
+ ret = hns3_check_gl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check gl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hns3_check_rl_coalesce_para(netdev, cmd);
+ if (ret) {
+ netdev_err(netdev,
+ "Check rl coalesce param fail. ret = %d\n", ret);
+ return ret;
+ }
+
+ if (cmd->use_adaptive_tx_coalesce == 1 ||
+ cmd->use_adaptive_rx_coalesce == 1) {
+ netdev_info(netdev,
+ "adaptive-tx=%u and adaptive-rx=%u, tx_usecs or rx_usecs will changed dynamically.\n",
+ cmd->use_adaptive_tx_coalesce,
+ cmd->use_adaptive_rx_coalesce);
+ }
+
+ return 0;
+}
+
+static void hns3_set_coalesce_per_queue(struct net_device *netdev,
+ struct ethtool_coalesce *cmd,
+ u32 queue)
+{
+ struct hns3_enet_tqp_vector *tx_vector, *rx_vector;
+ struct hns3_nic_priv *priv = netdev_priv(netdev);
+ struct hnae3_handle *h = priv->ae_handle;
+ int queue_num = h->kinfo.num_tqps;
+
+ tx_vector = priv->ring[queue].tqp_vector;
+ rx_vector = priv->ring[queue_num + queue].tqp_vector;
+
+ tx_vector->tx_group.coal.gl_adapt_enable =
+ cmd->use_adaptive_tx_coalesce;
+ rx_vector->rx_group.coal.gl_adapt_enable =
+ cmd->use_adaptive_rx_coalesce;
+
+ tx_vector->tx_group.coal.int_gl = cmd->tx_coalesce_usecs;
+ rx_vector->rx_group.coal.int_gl = cmd->rx_coalesce_usecs;
+
+ hns3_set_vector_coalesce_tx_gl(tx_vector,
+ tx_vector->tx_group.coal.int_gl);
+ hns3_set_vector_coalesce_rx_gl(rx_vector,
+ rx_vector->rx_group.coal.int_gl);
+
+ hns3_set_vector_coalesce_rl(tx_vector, h->kinfo.int_rl_setting);
+ hns3_set_vector_coalesce_rl(rx_vector, h->kinfo.int_rl_setting);
+}
+
+static int hns3_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+ u16 queue_num = h->kinfo.num_tqps;
+ int ret;
+ int i;
+
+ if (hns3_nic_resetting(netdev))
+ return -EBUSY;
+
+ ret = hns3_check_coalesce_para(netdev, cmd);
+ if (ret)
+ return ret;
+
+ h->kinfo.int_rl_setting =
+ hns3_rl_round_down(cmd->rx_coalesce_usecs_high);
+
+ for (i = 0; i < queue_num; i++)
+ hns3_set_coalesce_per_queue(netdev, cmd, i);
+
+ return 0;
+}
+
+static int hns3_get_regs_len(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->get_regs_len(h);
+}
+
+static void hns3_get_regs(struct net_device *netdev,
+ struct ethtool_regs *cmd, void *data)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->get_regs)
+ return;
+
+ h->ae_algo->ops->get_regs(h, &cmd->version, data);
+}
+
+static int hns3_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (!h->ae_algo->ops->set_led_id)
+ return -EOPNOTSUPP;
+
+ return h->ae_algo->ops->set_led_id(h, state);
+}
+
+static u32 hns3_get_msglevel(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ return h->msg_enable;
+}
+
+static void hns3_set_msglevel(struct net_device *netdev, u32 msg_level)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ h->msg_enable = msg_level;
+}
+
+/* Translate local fec value into ethtool value. */
+static unsigned int loc_to_eth_fec(u8 loc_fec)
+{
+ u32 eth_fec = 0;
+
+ if (loc_fec & BIT(HNAE3_FEC_AUTO))
+ eth_fec |= ETHTOOL_FEC_AUTO;
+ if (loc_fec & BIT(HNAE3_FEC_RS))
+ eth_fec |= ETHTOOL_FEC_RS;
+ if (loc_fec & BIT(HNAE3_FEC_BASER))
+ eth_fec |= ETHTOOL_FEC_BASER;
+
+ /* if nothing is set, then FEC is off */
+ if (!eth_fec)
+ eth_fec = ETHTOOL_FEC_OFF;
+
+ return eth_fec;
+}
+
+/* Translate ethtool fec value into local value. */
+static unsigned int eth_to_loc_fec(unsigned int eth_fec)
+{
+ u32 loc_fec = 0;
+
+ if (eth_fec & ETHTOOL_FEC_OFF)
+ return loc_fec;
+
+ if (eth_fec & ETHTOOL_FEC_AUTO)
+ loc_fec |= BIT(HNAE3_FEC_AUTO);
+ if (eth_fec & ETHTOOL_FEC_RS)
+ loc_fec |= BIT(HNAE3_FEC_RS);
+ if (eth_fec & ETHTOOL_FEC_BASER)
+ loc_fec |= BIT(HNAE3_FEC_BASER);
+
+ return loc_fec;
+}
+
+static int hns3_get_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u8 fec_ability;
+ u8 fec_mode;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
+ return -EOPNOTSUPP;
+
+ if (!ops->get_fec)
+ return -EOPNOTSUPP;
+
+ ops->get_fec(handle, &fec_ability, &fec_mode);
+
+ fec->fec = loc_to_eth_fec(fec_ability);
+ fec->active_fec = loc_to_eth_fec(fec_mode);
+
+ return 0;
+}
+
+static int hns3_set_fecparam(struct net_device *netdev,
+ struct ethtool_fecparam *fec)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ u32 fec_mode;
+
+ if (!test_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps))
+ return -EOPNOTSUPP;
+
+ if (!ops->set_fec)
+ return -EOPNOTSUPP;
+ fec_mode = eth_to_loc_fec(fec->fec);
+
+ netif_dbg(handle, drv, netdev, "set fecparam: mode=%u\n", fec_mode);
+
+ return ops->set_fec(handle, fec_mode);
+}
+
+static int hns3_get_module_info(struct net_device *netdev,
+ struct ethtool_modinfo *modinfo)
+{
+#define HNS3_SFF_8636_V1_3 0x03
+
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+ struct hns3_sfp_type sfp_type;
+ int ret;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ memset(&sfp_type, 0, sizeof(sfp_type));
+ ret = ops->get_module_eeprom(handle, 0, sizeof(sfp_type) / sizeof(u8),
+ (u8 *)&sfp_type);
+ if (ret)
+ return ret;
+
+ switch (sfp_type.type) {
+ case SFF8024_ID_SFP:
+ modinfo->type = ETH_MODULE_SFF_8472;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
+ break;
+ case SFF8024_ID_QSFP_8438:
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ break;
+ case SFF8024_ID_QSFP_8436_8636:
+ if (sfp_type.ext_type < HNS3_SFF_8636_V1_3) {
+ modinfo->type = ETH_MODULE_SFF_8436;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN;
+ } else {
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ }
+ break;
+ case SFF8024_ID_QSFP28_8636:
+ modinfo->type = ETH_MODULE_SFF_8636;
+ modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN;
+ break;
+ default:
+ netdev_err(netdev, "Optical module unknown: %#x\n",
+ sfp_type.type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hns3_get_module_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *ee, u8 *data)
+{
+ struct hnae3_handle *handle = hns3_get_handle(netdev);
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
+ const struct hnae3_ae_ops *ops = handle->ae_algo->ops;
+
+ if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !ops->get_module_eeprom)
+ return -EOPNOTSUPP;
+
+ if (!ee->len)
+ return -EINVAL;
+
+ memset(data, 0, ee->len);
+
+ return ops->get_module_eeprom(handle, ee->offset, ee->len, data);
+}
+
+#define HNS3_ETHTOOL_COALESCE (ETHTOOL_COALESCE_USECS | \
+ ETHTOOL_COALESCE_USE_ADAPTIVE | \
+ ETHTOOL_COALESCE_RX_USECS_HIGH | \
+ ETHTOOL_COALESCE_TX_USECS_HIGH)
+
+static const struct ethtool_ops hns3vf_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .get_drvinfo = hns3_get_drvinfo,
+ .get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
+ .get_strings = hns3_get_strings,
+ .get_ethtool_stats = hns3_get_stats,
+ .get_sset_count = hns3_get_sset_count,
+ .get_rxnfc = hns3_get_rxnfc,
+ .set_rxnfc = hns3_set_rxnfc,
+ .get_rxfh_key_size = hns3_get_rss_key_size,
+ .get_rxfh_indir_size = hns3_get_rss_indir_size,
+ .get_rxfh = hns3_get_rss,
+ .set_rxfh = hns3_set_rss,
+ .get_link_ksettings = hns3_get_link_ksettings,
+ .get_channels = hns3_get_channels,
+ .set_channels = hns3_set_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .get_link = hns3_get_link,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
+};
+
+static const struct ethtool_ops hns3_ethtool_ops = {
+ .supported_coalesce_params = HNS3_ETHTOOL_COALESCE,
+ .self_test = hns3_self_test,
+ .get_drvinfo = hns3_get_drvinfo,
+ .get_link = hns3_get_link,
+ .get_ringparam = hns3_get_ringparam,
+ .set_ringparam = hns3_set_ringparam,
+ .get_pauseparam = hns3_get_pauseparam,
+ .set_pauseparam = hns3_set_pauseparam,
+ .get_strings = hns3_get_strings,
+ .get_ethtool_stats = hns3_get_stats,
+ .get_sset_count = hns3_get_sset_count,
+ .get_rxnfc = hns3_get_rxnfc,
+ .set_rxnfc = hns3_set_rxnfc,
+ .get_rxfh_key_size = hns3_get_rss_key_size,
+ .get_rxfh_indir_size = hns3_get_rss_indir_size,
+ .get_rxfh = hns3_get_rss,
+ .set_rxfh = hns3_set_rss,
+ .get_link_ksettings = hns3_get_link_ksettings,
+ .set_link_ksettings = hns3_set_link_ksettings,
+ .nway_reset = hns3_nway_reset,
+ .get_channels = hns3_get_channels,
+ .set_channels = hns3_set_channels,
+ .get_coalesce = hns3_get_coalesce,
+ .set_coalesce = hns3_set_coalesce,
+ .get_regs_len = hns3_get_regs_len,
+ .get_regs = hns3_get_regs,
+ .set_phys_id = hns3_set_phys_id,
+ .get_msglevel = hns3_get_msglevel,
+ .set_msglevel = hns3_set_msglevel,
+ .get_fecparam = hns3_get_fecparam,
+ .set_fecparam = hns3_set_fecparam,
+ .get_module_info = hns3_get_module_info,
+ .get_module_eeprom = hns3_get_module_eeprom,
+};
+
+void hns3_ethtool_set_ops(struct net_device *netdev)
+{
+ struct hnae3_handle *h = hns3_get_handle(netdev);
+
+ if (h->flags & HNAE3_SUPPORT_VF)
+ netdev->ethtool_ops = &hns3vf_ethtool_ops;
+ else
+ netdev->ethtool_ops = &hns3_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
new file mode 100644
index 000000000..5153e5d41
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_trace.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HNS3_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HNS3_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define DESC_NR (sizeof(struct hns3_desc) / sizeof(u32))
+
+DECLARE_EVENT_CLASS(hns3_skb_template,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, headlen)
+ __field(unsigned int, len)
+ __field(__u8, nr_frags)
+ __field(__u8, ip_summed)
+ __field(unsigned int, hdr_len)
+ __field(unsigned short, gso_size)
+ __field(unsigned short, gso_segs)
+ __field(unsigned int, gso_type)
+ __field(bool, fraglist)
+ __array(__u32, size, MAX_SKB_FRAGS)
+ ),
+
+ TP_fast_assign(
+ __entry->headlen = skb_headlen(skb);
+ __entry->len = skb->len;
+ __entry->nr_frags = skb_shinfo(skb)->nr_frags;
+ __entry->gso_size = skb_shinfo(skb)->gso_size;
+ __entry->gso_segs = skb_shinfo(skb)->gso_segs;
+ __entry->gso_type = skb_shinfo(skb)->gso_type;
+ __entry->hdr_len = skb->encapsulation ?
+ skb_inner_transport_offset(skb) + inner_tcp_hdrlen(skb) :
+ skb_transport_offset(skb) + tcp_hdrlen(skb);
+ __entry->ip_summed = skb->ip_summed;
+ __entry->fraglist = skb_has_frag_list(skb);
+ hns3_shinfo_pack(skb_shinfo(skb), __entry->size);
+ ),
+
+ TP_printk(
+ "len: %u, %u, %u, cs: %u, gso: %u, %u, %x, frag(%d %u): %s",
+ __entry->headlen, __entry->len, __entry->hdr_len,
+ __entry->ip_summed, __entry->gso_size, __entry->gso_segs,
+ __entry->gso_type, __entry->fraglist, __entry->nr_frags,
+ __print_array(__entry->size, MAX_SKB_FRAGS, sizeof(__u32))
+ )
+);
+
+DEFINE_EVENT(hns3_skb_template, hns3_over_max_bd,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_gro,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+DEFINE_EVENT(hns3_skb_template, hns3_tso,
+ TP_PROTO(struct sk_buff *skb),
+ TP_ARGS(skb));
+
+TRACE_EVENT(hns3_tx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring, int cur_ntu),
+ TP_ARGS(ring, cur_ntu),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr,
+ memcpy(__entry->desc, &ring->desc[cur_ntu],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hns3_rx_desc,
+ TP_PROTO(struct hns3_enet_ring *ring),
+ TP_ARGS(ring),
+
+ TP_STRUCT__entry(
+ __field(int, index)
+ __field(int, ntu)
+ __field(int, ntc)
+ __field(dma_addr_t, desc_dma)
+ __field(dma_addr_t, buf_dma)
+ __array(u32, desc, DESC_NR)
+ __string(devname, ring->tqp->handle->kinfo.netdev->name)
+ ),
+
+ TP_fast_assign(
+ __entry->index = ring->tqp->tqp_index;
+ __entry->ntu = ring->next_to_use;
+ __entry->ntc = ring->next_to_clean;
+ __entry->desc_dma = ring->desc_dma_addr;
+ __entry->buf_dma = ring->desc_cb[ring->next_to_clean].dma;
+ memcpy(__entry->desc, &ring->desc[ring->next_to_clean],
+ sizeof(struct hns3_desc));
+ __assign_str(devname, ring->tqp->handle->kinfo.netdev->name);
+ ),
+
+ TP_printk(
+ "%s-%d-%d/%d desc(%pad) buf(%pad): %s",
+ __get_str(devname), __entry->index, __entry->ntu,
+ __entry->ntc, &__entry->desc_dma, &__entry->buf_dma,
+ __print_array(__entry->desc, DESC_NR, sizeof(u32))
+ )
+);
+
+#endif /* _HNS3_TRACE_H_ */
+
+/* This must be outside ifdef _HNS3_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hns3_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
new file mode 100644
index 000000000..6c28c8f62
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(srctree)/$(src)
+
+obj-$(CONFIG_HNS3_HCLGE) += hclge.o
+hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o
+
+hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
new file mode 100644
index 000000000..6f9f759ce
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c
@@ -0,0 +1,537 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/dma-direction.h>
+#include "hclge_cmd.h"
+#include "hnae3.h"
+#include "hclge_main.h"
+
+#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
+
+static int hclge_ring_space(struct hclge_cmq_ring *ring)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+ int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static int is_valid_csq_clean_head(struct hclge_cmq_ring *ring, int head)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
+static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclge_desc);
+
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
+}
+
+static int hclge_alloc_cmd_queue(struct hclge_dev *hdev, int ring_type)
+{
+ struct hclge_hw *hw = &hdev->hw;
+ struct hclge_cmq_ring *ring =
+ (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
+ int ret;
+
+ ring->ring_type = ring_type;
+ ring->dev = hdev;
+
+ ret = hclge_alloc_cmd_desc(ring);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
+ (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read)
+{
+ desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
+}
+
+void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode, bool is_read)
+{
+ memset((void *)desc, 0, sizeof(struct hclge_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
+}
+
+static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
+{
+ dma_addr_t dma = ring->desc_dma_addr;
+ struct hclge_dev *hdev = ring->dev;
+ struct hclge_hw *hw = &hdev->hw;
+ u32 reg_val;
+
+ if (ring->ring_type == HCLGE_TYPE_CSQ) {
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ reg_val = hclge_read_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGE_NIC_SW_RST_RDY;
+ reg_val |= ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S;
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
+ lower_32_bits(dma));
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
+ upper_32_bits(dma));
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
+ ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+static void hclge_cmd_init_regs(struct hclge_hw *hw)
+{
+ hclge_cmd_config_regs(&hw->cmq.csq);
+ hclge_cmd_config_regs(&hw->cmq.crq);
+}
+
+static int hclge_cmd_csq_clean(struct hclge_hw *hw)
+{
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
+ struct hclge_cmq_ring *csq = &hw->cmq.csq;
+ u32 head;
+ int clean;
+
+ head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ dev_warn(&hdev->pdev->dev,
+ "IMP firmware watchdog reset soon expected!\n");
+ return -EIO;
+ }
+
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
+ return clean;
+}
+
+static int hclge_cmd_csq_done(struct hclge_hw *hw)
+{
+ u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static bool hclge_is_special_opcode(u16 opcode)
+{
+ /* these commands have several descriptors,
+ * and use the first one to save opcode and return value
+ */
+ u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT,
+ HCLGE_OPC_STATS_32_BIT,
+ HCLGE_OPC_STATS_MAC,
+ HCLGE_OPC_STATS_MAC_ALL,
+ HCLGE_OPC_QUERY_32_BIT_REG,
+ HCLGE_OPC_QUERY_64_BIT_REG,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
+ if (spec_opcode[i] == opcode)
+ return true;
+ }
+
+ return false;
+}
+
+static int hclge_cmd_convert_err_code(u16 desc_ret)
+{
+ switch (desc_ret) {
+ case HCLGE_CMD_EXEC_SUCCESS:
+ return 0;
+ case HCLGE_CMD_NO_AUTH:
+ return -EPERM;
+ case HCLGE_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HCLGE_CMD_QUEUE_FULL:
+ return -EXFULL;
+ case HCLGE_CMD_NEXT_ERR:
+ return -ENOSR;
+ case HCLGE_CMD_UNEXE_ERR:
+ return -ENOTBLK;
+ case HCLGE_CMD_PARA_ERR:
+ return -EINVAL;
+ case HCLGE_CMD_RESULT_ERR:
+ return -ERANGE;
+ case HCLGE_CMD_TIMEOUT:
+ return -ETIME;
+ case HCLGE_CMD_HILINK_ERR:
+ return -ENOLINK;
+ case HCLGE_CMD_QUEUE_ILLEGAL:
+ return -ENXIO;
+ case HCLGE_CMD_INVALID:
+ return -EBADR;
+ default:
+ return -EIO;
+ }
+}
+
+static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
+ int num, int ntc)
+{
+ u16 opcode, desc_ret;
+ int handle;
+
+ opcode = le16_to_cpu(desc[0].opcode);
+ for (handle = 0; handle < num; handle++) {
+ desc[handle] = hw->cmq.csq.desc[ntc];
+ ntc++;
+ if (ntc >= hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ if (likely(!hclge_is_special_opcode(opcode)))
+ desc_ret = le16_to_cpu(desc[num - 1].retval);
+ else
+ desc_ret = le16_to_cpu(desc[0].retval);
+
+ hw->cmq.last_status = desc_ret;
+
+ return hclge_cmd_convert_err_code(desc_ret);
+}
+
+/**
+ * hclge_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ **/
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
+{
+ struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
+ struct hclge_cmq_ring *csq = &hw->cmq.csq;
+ struct hclge_desc *desc_to_use;
+ bool complete = false;
+ u32 timeout = 0;
+ int handle = 0;
+ int retval;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclge_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /**
+ * Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+
+ /* Write to hardware */
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
+
+ /**
+ * If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclge_cmd_csq_done(hw)) {
+ complete = true;
+ break;
+ }
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (!complete)
+ retval = -EBADE;
+ else
+ retval = hclge_cmd_check_retval(hw, desc, num, ntc);
+
+ /* Clean the command send queue */
+ handle = hclge_cmd_csq_clean(hw);
+ if (handle < 0)
+ retval = handle;
+ else if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return retval;
+}
+
+static void hclge_set_default_capability(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+}
+
+static void hclge_parse_capability(struct hclge_dev *hdev,
+ struct hclge_query_version_cmd *cmd)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ u32 caps;
+
+ caps = __le32_to_cpu(cmd->caps[0]);
+
+ if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B))
+ set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B))
+ set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B))
+ set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B))
+ set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
+}
+
+static enum hclge_cmd_status
+hclge_cmd_query_version_and_capability(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_query_version_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
+ resp = (struct hclge_query_version_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ hdev->fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= hdev->pdev->revision;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ hclge_set_default_capability(hdev);
+
+ hclge_parse_capability(hdev, resp);
+
+ return ret;
+}
+
+int hclge_cmd_queue_init(struct hclge_dev *hdev)
+{
+ int ret;
+
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
+ /* Setup the queue entries for use cmd queue */
+ hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
+ hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
+
+ /* Setup Tx write back timeout */
+ hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
+
+ /* Setup queue rings */
+ ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CSQ);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "CSQ ring setup error %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_alloc_cmd_queue(hdev, HCLGE_TYPE_CRQ);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "CRQ ring setup error %d\n", ret);
+ goto err_csq;
+ }
+
+ return 0;
+err_csq:
+ hclge_free_cmd_desc(&hdev->hw.cmq.csq);
+ return ret;
+}
+
+static int hclge_firmware_compat_config(struct hclge_dev *hdev)
+{
+ struct hclge_firmware_compat_cmd *req;
+ struct hclge_desc desc;
+ u32 compat = 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_COMPAT_CFG, false);
+
+ req = (struct hclge_firmware_compat_cmd *)desc.data;
+
+ hnae3_set_bit(compat, HCLGE_LINK_EVENT_REPORT_EN_B, 1);
+ hnae3_set_bit(compat, HCLGE_NCSI_ERROR_REPORT_EN_B, 1);
+ req->compat = cpu_to_le32(compat);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_cmd_init(struct hclge_dev *hdev)
+{
+ int ret;
+
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
+
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+ hdev->hw.cmq.crq.next_to_use = 0;
+
+ hclge_cmd_init_regs(&hdev->hw);
+
+ spin_unlock(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if ((hclge_is_reset_pending(hdev))) {
+ dev_err(&hdev->pdev->dev,
+ "failed to init cmd since reset %#lx pending\n",
+ hdev->reset_pending);
+ ret = -EBUSY;
+ goto err_cmd_init;
+ }
+
+ /* get version and device capabilities */
+ ret = hclge_cmd_query_version_and_capability(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query version and capabilities, ret = %d\n",
+ ret);
+ goto err_cmd_init;
+ }
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ /* ask the firmware to enable some features, driver can work without
+ * it.
+ */
+ ret = hclge_firmware_compat_config(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "Firmware compatible features not enabled(%d).\n",
+ ret);
+
+ return 0;
+
+err_cmd_init:
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+
+ return ret;
+}
+
+static void hclge_cmd_uninit_regs(struct hclge_hw *hw)
+{
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
+ hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
+}
+
+void hclge_cmd_uninit(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ /* wait to ensure that the firmware completes the possible left
+ * over commands.
+ */
+ msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME);
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
+ hclge_cmd_uninit_regs(&hdev->hw);
+ spin_unlock(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ hclge_free_cmd_desc(&hdev->hw.cmq.csq);
+ hclge_free_cmd_desc(&hdev->hw.cmq.crq);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
new file mode 100644
index 000000000..3d70c3a47
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h
@@ -0,0 +1,1144 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_CMD_H
+#define __HCLGE_CMD_H
+#include <linux/types.h>
+#include <linux/io.h>
+#include <linux/etherdevice.h>
+#include "hnae3.h"
+
+#define HCLGE_CMDQ_TX_TIMEOUT 30000
+#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200
+#define HCLGE_DESC_DATA_LEN 6
+
+struct hclge_dev;
+struct hclge_desc {
+ __le16 opcode;
+
+#define HCLGE_CMDQ_RX_INVLD_B 0
+#define HCLGE_CMDQ_RX_OUTVLD_B 1
+
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[HCLGE_DESC_DATA_LEN];
+};
+
+struct hclge_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclge_desc *desc;
+ struct hclge_dev *dev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 ring_type; /* cmq ring type */
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclge_cmd_return_status {
+ HCLGE_CMD_EXEC_SUCCESS = 0,
+ HCLGE_CMD_NO_AUTH = 1,
+ HCLGE_CMD_NOT_SUPPORTED = 2,
+ HCLGE_CMD_QUEUE_FULL = 3,
+ HCLGE_CMD_NEXT_ERR = 4,
+ HCLGE_CMD_UNEXE_ERR = 5,
+ HCLGE_CMD_PARA_ERR = 6,
+ HCLGE_CMD_RESULT_ERR = 7,
+ HCLGE_CMD_TIMEOUT = 8,
+ HCLGE_CMD_HILINK_ERR = 9,
+ HCLGE_CMD_QUEUE_ILLEGAL = 10,
+ HCLGE_CMD_INVALID = 11,
+};
+
+enum hclge_cmd_status {
+ HCLGE_STATUS_SUCCESS = 0,
+ HCLGE_ERR_CSQ_FULL = -1,
+ HCLGE_ERR_CSQ_TIMEOUT = -2,
+ HCLGE_ERR_CSQ_ERROR = -3,
+};
+
+struct hclge_misc_vector {
+ u8 __iomem *addr;
+ int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
+};
+
+struct hclge_cmq {
+ struct hclge_cmq_ring csq;
+ struct hclge_cmq_ring crq;
+ u16 tx_timeout;
+ enum hclge_cmd_status last_status;
+};
+
+#define HCLGE_CMD_FLAG_IN BIT(0)
+#define HCLGE_CMD_FLAG_OUT BIT(1)
+#define HCLGE_CMD_FLAG_NEXT BIT(2)
+#define HCLGE_CMD_FLAG_WR BIT(3)
+#define HCLGE_CMD_FLAG_NO_INTR BIT(4)
+#define HCLGE_CMD_FLAG_ERR_INTR BIT(5)
+
+enum hclge_opcode_type {
+ /* Generic commands */
+ HCLGE_OPC_QUERY_FW_VER = 0x0001,
+ HCLGE_OPC_CFG_RST_TRIGGER = 0x0020,
+ HCLGE_OPC_GBL_RST_STATUS = 0x0021,
+ HCLGE_OPC_QUERY_FUNC_STATUS = 0x0022,
+ HCLGE_OPC_QUERY_PF_RSRC = 0x0023,
+ HCLGE_OPC_QUERY_VF_RSRC = 0x0024,
+ HCLGE_OPC_GET_CFG_PARAM = 0x0025,
+ HCLGE_OPC_PF_RST_DONE = 0x0026,
+ HCLGE_OPC_QUERY_VF_RST_RDY = 0x0027,
+
+ HCLGE_OPC_STATS_64_BIT = 0x0030,
+ HCLGE_OPC_STATS_32_BIT = 0x0031,
+ HCLGE_OPC_STATS_MAC = 0x0032,
+ HCLGE_OPC_QUERY_MAC_REG_NUM = 0x0033,
+ HCLGE_OPC_STATS_MAC_ALL = 0x0034,
+
+ HCLGE_OPC_QUERY_REG_NUM = 0x0040,
+ HCLGE_OPC_QUERY_32_BIT_REG = 0x0041,
+ HCLGE_OPC_QUERY_64_BIT_REG = 0x0042,
+ HCLGE_OPC_DFX_BD_NUM = 0x0043,
+ HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044,
+ HCLGE_OPC_DFX_SSU_REG_0 = 0x0045,
+ HCLGE_OPC_DFX_SSU_REG_1 = 0x0046,
+ HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047,
+ HCLGE_OPC_DFX_RPU_REG_0 = 0x0048,
+ HCLGE_OPC_DFX_RPU_REG_1 = 0x0049,
+ HCLGE_OPC_DFX_NCSI_REG = 0x004A,
+ HCLGE_OPC_DFX_RTC_REG = 0x004B,
+ HCLGE_OPC_DFX_PPP_REG = 0x004C,
+ HCLGE_OPC_DFX_RCB_REG = 0x004D,
+ HCLGE_OPC_DFX_TQP_REG = 0x004E,
+ HCLGE_OPC_DFX_SSU_REG_2 = 0x004F,
+
+ HCLGE_OPC_QUERY_DEV_SPECS = 0x0050,
+
+ /* MAC command */
+ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301,
+ HCLGE_OPC_CONFIG_AN_MODE = 0x0304,
+ HCLGE_OPC_QUERY_LINK_STATUS = 0x0307,
+ HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308,
+ HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309,
+ HCLGE_OPC_QUERY_MAC_TNL_INT = 0x0310,
+ HCLGE_OPC_MAC_TNL_INT_EN = 0x0311,
+ HCLGE_OPC_CLEAR_MAC_TNL_INT = 0x0312,
+ HCLGE_OPC_SERDES_LOOPBACK = 0x0315,
+ HCLGE_OPC_CONFIG_FEC_MODE = 0x031A,
+
+ /* PFC/Pause commands */
+ HCLGE_OPC_CFG_MAC_PAUSE_EN = 0x0701,
+ HCLGE_OPC_CFG_PFC_PAUSE_EN = 0x0702,
+ HCLGE_OPC_CFG_MAC_PARA = 0x0703,
+ HCLGE_OPC_CFG_PFC_PARA = 0x0704,
+ HCLGE_OPC_QUERY_MAC_TX_PKT_CNT = 0x0705,
+ HCLGE_OPC_QUERY_MAC_RX_PKT_CNT = 0x0706,
+ HCLGE_OPC_QUERY_PFC_TX_PKT_CNT = 0x0707,
+ HCLGE_OPC_QUERY_PFC_RX_PKT_CNT = 0x0708,
+ HCLGE_OPC_PRI_TO_TC_MAPPING = 0x0709,
+ HCLGE_OPC_QOS_MAP = 0x070A,
+
+ /* ETS/scheduler commands */
+ HCLGE_OPC_TM_PG_TO_PRI_LINK = 0x0804,
+ HCLGE_OPC_TM_QS_TO_PRI_LINK = 0x0805,
+ HCLGE_OPC_TM_NQ_TO_QS_LINK = 0x0806,
+ HCLGE_OPC_TM_RQ_TO_QS_LINK = 0x0807,
+ HCLGE_OPC_TM_PORT_WEIGHT = 0x0808,
+ HCLGE_OPC_TM_PG_WEIGHT = 0x0809,
+ HCLGE_OPC_TM_QS_WEIGHT = 0x080A,
+ HCLGE_OPC_TM_PRI_WEIGHT = 0x080B,
+ HCLGE_OPC_TM_PRI_C_SHAPPING = 0x080C,
+ HCLGE_OPC_TM_PRI_P_SHAPPING = 0x080D,
+ HCLGE_OPC_TM_PG_C_SHAPPING = 0x080E,
+ HCLGE_OPC_TM_PG_P_SHAPPING = 0x080F,
+ HCLGE_OPC_TM_PORT_SHAPPING = 0x0810,
+ HCLGE_OPC_TM_PG_SCH_MODE_CFG = 0x0812,
+ HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813,
+ HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814,
+ HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815,
+ HCLGE_OPC_ETS_TC_WEIGHT = 0x0843,
+ HCLGE_OPC_QSET_DFX_STS = 0x0844,
+ HCLGE_OPC_PRI_DFX_STS = 0x0845,
+ HCLGE_OPC_PG_DFX_STS = 0x0846,
+ HCLGE_OPC_PORT_DFX_STS = 0x0847,
+ HCLGE_OPC_SCH_NQ_CNT = 0x0848,
+ HCLGE_OPC_SCH_RQ_CNT = 0x0849,
+ HCLGE_OPC_TM_INTERNAL_STS = 0x0850,
+ HCLGE_OPC_TM_INTERNAL_CNT = 0x0851,
+ HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852,
+
+ /* Packet buffer allocate commands */
+ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901,
+ HCLGE_OPC_RX_PRIV_BUFF_ALLOC = 0x0902,
+ HCLGE_OPC_RX_PRIV_WL_ALLOC = 0x0903,
+ HCLGE_OPC_RX_COM_THRD_ALLOC = 0x0904,
+ HCLGE_OPC_RX_COM_WL_ALLOC = 0x0905,
+ HCLGE_OPC_RX_GBL_PKT_CNT = 0x0906,
+
+ /* TQP management command */
+ HCLGE_OPC_SET_TQP_MAP = 0x0A01,
+
+ /* TQP commands */
+ HCLGE_OPC_CFG_TX_QUEUE = 0x0B01,
+ HCLGE_OPC_QUERY_TX_POINTER = 0x0B02,
+ HCLGE_OPC_QUERY_TX_STATS = 0x0B03,
+ HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04,
+ HCLGE_OPC_CFG_RX_QUEUE = 0x0B11,
+ HCLGE_OPC_QUERY_RX_POINTER = 0x0B12,
+ HCLGE_OPC_QUERY_RX_STATS = 0x0B13,
+ HCLGE_OPC_STASH_RX_QUEUE_LRO = 0x0B16,
+ HCLGE_OPC_CFG_RX_QUEUE_LRO = 0x0B17,
+ HCLGE_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ HCLGE_OPC_RESET_TQP_QUEUE = 0x0B22,
+
+ /* PPU commands */
+ HCLGE_OPC_PPU_PF_OTHER_INT_DFX = 0x0B4A,
+
+ /* TSO command */
+ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
+ HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
+
+ /* RSS commands */
+ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGE_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGE_OPC_RSS_TC_MODE = 0x0D08,
+ HCLGE_OPC_RSS_INPUT_TUPLE = 0x0D02,
+
+ /* Promisuous mode command */
+ HCLGE_OPC_CFG_PROMISC_MODE = 0x0E01,
+
+ /* Vlan offload commands */
+ HCLGE_OPC_VLAN_PORT_TX_CFG = 0x0F01,
+ HCLGE_OPC_VLAN_PORT_RX_CFG = 0x0F02,
+
+ /* Interrupts commands */
+ HCLGE_OPC_ADD_RING_TO_VECTOR = 0x1503,
+ HCLGE_OPC_DEL_RING_TO_VECTOR = 0x1504,
+
+ /* MAC commands */
+ HCLGE_OPC_MAC_VLAN_ADD = 0x1000,
+ HCLGE_OPC_MAC_VLAN_REMOVE = 0x1001,
+ HCLGE_OPC_MAC_VLAN_TYPE_ID = 0x1002,
+ HCLGE_OPC_MAC_VLAN_INSERT = 0x1003,
+ HCLGE_OPC_MAC_VLAN_ALLOCATE = 0x1004,
+ HCLGE_OPC_MAC_ETHTYPE_ADD = 0x1010,
+ HCLGE_OPC_MAC_ETHTYPE_REMOVE = 0x1011,
+
+ /* MAC VLAN commands */
+ HCLGE_OPC_MAC_VLAN_SWITCH_PARAM = 0x1033,
+
+ /* VLAN commands */
+ HCLGE_OPC_VLAN_FILTER_CTRL = 0x1100,
+ HCLGE_OPC_VLAN_FILTER_PF_CFG = 0x1101,
+ HCLGE_OPC_VLAN_FILTER_VF_CFG = 0x1102,
+
+ /* Flow Director commands */
+ HCLGE_OPC_FD_MODE_CTRL = 0x1200,
+ HCLGE_OPC_FD_GET_ALLOCATION = 0x1201,
+ HCLGE_OPC_FD_KEY_CONFIG = 0x1202,
+ HCLGE_OPC_FD_TCAM_OP = 0x1203,
+ HCLGE_OPC_FD_AD_OP = 0x1204,
+
+ /* MDIO command */
+ HCLGE_OPC_MDIO_CONFIG = 0x1900,
+
+ /* QCN commands */
+ HCLGE_OPC_QCN_MOD_CFG = 0x1A01,
+ HCLGE_OPC_QCN_GRP_TMPLT_CFG = 0x1A02,
+ HCLGE_OPC_QCN_SHAPPING_CFG = 0x1A03,
+ HCLGE_OPC_QCN_SHAPPING_BS_CFG = 0x1A04,
+ HCLGE_OPC_QCN_QSET_LINK_CFG = 0x1A05,
+ HCLGE_OPC_QCN_RP_STATUS_GET = 0x1A06,
+ HCLGE_OPC_QCN_AJUST_INIT = 0x1A07,
+ HCLGE_OPC_QCN_DFX_CNT_STATUS = 0x1A08,
+
+ /* Mailbox command */
+ HCLGEVF_OPC_MBX_PF_TO_VF = 0x2000,
+
+ /* Led command */
+ HCLGE_OPC_LED_STATUS_CFG = 0xB000,
+
+ /* clear hardware resource command */
+ HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B,
+
+ /* NCL config command */
+ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011,
+
+ /* M7 stats command */
+ HCLGE_OPC_M7_STATS_BD = 0x7012,
+ HCLGE_OPC_M7_STATS_INFO = 0x7013,
+ HCLGE_OPC_M7_COMPAT_CFG = 0x701A,
+
+ /* SFP command */
+ HCLGE_OPC_GET_SFP_EEPROM = 0x7100,
+ HCLGE_OPC_GET_SFP_EXIST = 0x7101,
+ HCLGE_OPC_GET_SFP_INFO = 0x7104,
+
+ /* Error INT commands */
+ HCLGE_MAC_COMMON_INT_EN = 0x030E,
+ HCLGE_TM_SCH_ECC_INT_EN = 0x0829,
+ HCLGE_SSU_ECC_INT_CMD = 0x0989,
+ HCLGE_SSU_COMMON_INT_CMD = 0x098C,
+ HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40,
+ HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41,
+ HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42,
+ HCLGE_COMMON_ECC_INT_CFG = 0x1505,
+ HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510,
+ HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511,
+ HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512,
+ HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513,
+ HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514,
+ HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515,
+ HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580,
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581,
+ HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584,
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD = 0x1585,
+ HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD = 0x1586,
+ HCLGE_IGU_EGU_TNL_INT_EN = 0x1803,
+ HCLGE_IGU_COMMON_INT_EN = 0x1806,
+ HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14,
+ HCLGE_PPP_CMD0_INT_CMD = 0x2100,
+ HCLGE_PPP_CMD1_INT_CMD = 0x2101,
+ HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105,
+ HCLGE_NCSI_INT_EN = 0x2401,
+};
+
+#define HCLGE_TQP_REG_OFFSET 0x80000
+#define HCLGE_TQP_REG_SIZE 0x200
+
+#define HCLGE_RCB_INIT_QUERY_TIMEOUT 10
+#define HCLGE_RCB_INIT_FLAG_EN_B 0
+#define HCLGE_RCB_INIT_FLAG_FINI_B 8
+struct hclge_config_rcb_init_cmd {
+ __le16 rcb_init_flag;
+ u8 rsv[22];
+};
+
+struct hclge_tqp_map_cmd {
+ __le16 tqp_id; /* Absolute tqp id for in this pf */
+ u8 tqp_vf; /* VF id */
+#define HCLGE_TQP_MAP_TYPE_PF 0
+#define HCLGE_TQP_MAP_TYPE_VF 1
+#define HCLGE_TQP_MAP_TYPE_B 0
+#define HCLGE_TQP_MAP_EN_B 1
+ u8 tqp_flag; /* Indicate it's pf or vf tqp */
+ __le16 tqp_vid; /* Virtual id in this pf/vf */
+ u8 rsv[18];
+};
+
+#define HCLGE_VECTOR_ELEMENTS_PER_CMD 10
+
+enum hclge_int_type {
+ HCLGE_INT_TX,
+ HCLGE_INT_RX,
+ HCLGE_INT_EVENT,
+};
+
+struct hclge_ctrl_vector_chain_cmd {
+ u8 int_vector_id;
+ u8 int_cause_num;
+#define HCLGE_INT_TYPE_S 0
+#define HCLGE_INT_TYPE_M GENMASK(1, 0)
+#define HCLGE_TQP_ID_S 2
+#define HCLGE_TQP_ID_M GENMASK(12, 2)
+#define HCLGE_INT_GL_IDX_S 13
+#define HCLGE_INT_GL_IDX_M GENMASK(14, 13)
+ __le16 tqp_type_and_id[HCLGE_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+ u8 rsv;
+};
+
+#define HCLGE_MAX_TC_NUM 8
+#define HCLGE_TC0_PRI_BUF_EN_B 15 /* Bit 15 indicate enable or not */
+#define HCLGE_BUF_UNIT_S 7 /* Buf size is united by 128 bytes */
+struct hclge_tx_buff_alloc_cmd {
+ __le16 tx_pkt_buff[HCLGE_MAX_TC_NUM];
+ u8 tx_buff_rsv[8];
+};
+
+struct hclge_rx_priv_buff_cmd {
+ __le16 buf_num[HCLGE_MAX_TC_NUM];
+ __le16 shared_buf;
+ u8 rsv[6];
+};
+
+enum HCLGE_CAP_BITS {
+ HCLGE_CAP_UDP_GSO_B,
+ HCLGE_CAP_QB_B,
+ HCLGE_CAP_FD_FORWARD_TC_B,
+ HCLGE_CAP_PTP_B,
+ HCLGE_CAP_INT_QL_B,
+ HCLGE_CAP_SIMPLE_BD_B,
+ HCLGE_CAP_TX_PUSH_B,
+ HCLGE_CAP_PHY_IMP_B,
+ HCLGE_CAP_TQP_TXRX_INDEP_B,
+ HCLGE_CAP_HW_PAD_B,
+ HCLGE_CAP_STASH_B,
+};
+
+#define HCLGE_QUERY_CAP_LENGTH 3
+struct hclge_query_version_cmd {
+ __le32 firmware;
+ __le32 hardware;
+ __le32 rsv;
+ __le32 caps[HCLGE_QUERY_CAP_LENGTH]; /* capabilities of device */
+};
+
+#define HCLGE_RX_PRIV_EN_B 15
+#define HCLGE_TC_NUM_ONE_DESC 4
+struct hclge_priv_wl {
+ __le16 high;
+ __le16 low;
+};
+
+struct hclge_rx_priv_wl_buf {
+ struct hclge_priv_wl tc_wl[HCLGE_TC_NUM_ONE_DESC];
+};
+
+struct hclge_rx_com_thrd {
+ struct hclge_priv_wl com_thrd[HCLGE_TC_NUM_ONE_DESC];
+};
+
+struct hclge_rx_com_wl {
+ struct hclge_priv_wl com_wl;
+};
+
+struct hclge_waterline {
+ u32 low;
+ u32 high;
+};
+
+struct hclge_tc_thrd {
+ u32 low;
+ u32 high;
+};
+
+struct hclge_priv_buf {
+ struct hclge_waterline wl; /* Waterline for low and high*/
+ u32 buf_size; /* TC private buffer size */
+ u32 tx_buf_size;
+ u32 enable; /* Enable TC private buffer or not */
+};
+
+struct hclge_shared_buf {
+ struct hclge_waterline self;
+ struct hclge_tc_thrd tc_thrd[HCLGE_MAX_TC_NUM];
+ u32 buf_size;
+};
+
+struct hclge_pkt_buf_alloc {
+ struct hclge_priv_buf priv_buf[HCLGE_MAX_TC_NUM];
+ struct hclge_shared_buf s_buf;
+};
+
+#define HCLGE_RX_COM_WL_EN_B 15
+struct hclge_rx_com_wl_buf_cmd {
+ __le16 high_wl;
+ __le16 low_wl;
+ u8 rsv[20];
+};
+
+#define HCLGE_RX_PKT_EN_B 15
+struct hclge_rx_pkt_buf_cmd {
+ __le16 high_pkt;
+ __le16 low_pkt;
+ u8 rsv[20];
+};
+
+#define HCLGE_PF_STATE_DONE_B 0
+#define HCLGE_PF_STATE_MAIN_B 1
+#define HCLGE_PF_STATE_BOND_B 2
+#define HCLGE_PF_STATE_MAC_N_B 6
+#define HCLGE_PF_MAC_NUM_MASK 0x3
+#define HCLGE_PF_STATE_MAIN BIT(HCLGE_PF_STATE_MAIN_B)
+#define HCLGE_PF_STATE_DONE BIT(HCLGE_PF_STATE_DONE_B)
+#define HCLGE_VF_RST_STATUS_CMD 4
+
+struct hclge_func_status_cmd {
+ __le32 vf_rst_state[HCLGE_VF_RST_STATUS_CMD];
+ u8 pf_state;
+ u8 mac_id;
+ u8 rsv1;
+ u8 pf_cnt_in_mac;
+ u8 pf_num;
+ u8 vf_num;
+ u8 rsv[2];
+};
+
+struct hclge_pf_res_cmd {
+ __le16 tqp_num;
+ __le16 buf_size;
+ __le16 msixcap_localid_ba_nic;
+ __le16 msixcap_localid_ba_rocee;
+#define HCLGE_MSIX_OFT_ROCEE_S 0
+#define HCLGE_MSIX_OFT_ROCEE_M GENMASK(15, 0)
+#define HCLGE_PF_VEC_NUM_S 0
+#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
+ __le16 pf_intr_vector_number;
+ __le16 pf_own_fun_number;
+ __le16 tx_buf_size;
+ __le16 dv_buf_size;
+ __le32 rsv[2];
+};
+
+#define HCLGE_CFG_OFFSET_S 0
+#define HCLGE_CFG_OFFSET_M GENMASK(19, 0)
+#define HCLGE_CFG_RD_LEN_S 24
+#define HCLGE_CFG_RD_LEN_M GENMASK(27, 24)
+#define HCLGE_CFG_RD_LEN_BYTES 16
+#define HCLGE_CFG_RD_LEN_UNIT 4
+
+#define HCLGE_CFG_VMDQ_S 0
+#define HCLGE_CFG_VMDQ_M GENMASK(7, 0)
+#define HCLGE_CFG_TC_NUM_S 8
+#define HCLGE_CFG_TC_NUM_M GENMASK(15, 8)
+#define HCLGE_CFG_TQP_DESC_N_S 16
+#define HCLGE_CFG_TQP_DESC_N_M GENMASK(31, 16)
+#define HCLGE_CFG_PHY_ADDR_S 0
+#define HCLGE_CFG_PHY_ADDR_M GENMASK(7, 0)
+#define HCLGE_CFG_MEDIA_TP_S 8
+#define HCLGE_CFG_MEDIA_TP_M GENMASK(15, 8)
+#define HCLGE_CFG_RX_BUF_LEN_S 16
+#define HCLGE_CFG_RX_BUF_LEN_M GENMASK(31, 16)
+#define HCLGE_CFG_MAC_ADDR_H_S 0
+#define HCLGE_CFG_MAC_ADDR_H_M GENMASK(15, 0)
+#define HCLGE_CFG_DEFAULT_SPEED_S 16
+#define HCLGE_CFG_DEFAULT_SPEED_M GENMASK(23, 16)
+#define HCLGE_CFG_RSS_SIZE_S 24
+#define HCLGE_CFG_RSS_SIZE_M GENMASK(31, 24)
+#define HCLGE_CFG_SPEED_ABILITY_S 0
+#define HCLGE_CFG_SPEED_ABILITY_M GENMASK(7, 0)
+#define HCLGE_CFG_SPEED_ABILITY_EXT_S 10
+#define HCLGE_CFG_SPEED_ABILITY_EXT_M GENMASK(15, 10)
+#define HCLGE_CFG_UMV_TBL_SPACE_S 16
+#define HCLGE_CFG_UMV_TBL_SPACE_M GENMASK(31, 16)
+
+#define HCLGE_CFG_CMD_CNT 4
+
+struct hclge_cfg_param_cmd {
+ __le32 offset;
+ __le32 rsv;
+ __le32 param[HCLGE_CFG_CMD_CNT];
+};
+
+#define HCLGE_MAC_MODE 0x0
+#define HCLGE_DESC_NUM 0x40
+
+#define HCLGE_ALLOC_VALID_B 0
+struct hclge_vf_num_cmd {
+ u8 alloc_valid;
+ u8 rsv[23];
+};
+
+#define HCLGE_RSS_DEFAULT_OUTPORT_B 4
+#define HCLGE_RSS_HASH_KEY_OFFSET_B 4
+#define HCLGE_RSS_HASH_KEY_NUM 16
+struct hclge_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGE_RSS_HASH_KEY_NUM];
+};
+
+struct hclge_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+#define HCLGE_RSS_CFG_TBL_SIZE 16
+
+struct hclge_rss_indirection_table_cmd {
+ __le16 start_table_index;
+ __le16 rss_set_bitmap;
+ u8 rsv[4];
+ u8 rss_result[HCLGE_RSS_CFG_TBL_SIZE];
+};
+
+#define HCLGE_RSS_TC_OFFSET_S 0
+#define HCLGE_RSS_TC_OFFSET_M GENMASK(9, 0)
+#define HCLGE_RSS_TC_SIZE_S 12
+#define HCLGE_RSS_TC_SIZE_M GENMASK(14, 12)
+#define HCLGE_RSS_TC_VALID_B 15
+struct hclge_rss_tc_mode_cmd {
+ __le16 rss_tc_mode[HCLGE_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+#define HCLGE_LINK_STATUS_UP_B 0
+#define HCLGE_LINK_STATUS_UP_M BIT(HCLGE_LINK_STATUS_UP_B)
+struct hclge_link_status_cmd {
+ u8 status;
+ u8 rsv[23];
+};
+
+struct hclge_promisc_param {
+ u8 vf_id;
+ u8 enable;
+};
+
+#define HCLGE_PROMISC_TX_EN_B BIT(4)
+#define HCLGE_PROMISC_RX_EN_B BIT(5)
+#define HCLGE_PROMISC_EN_B 1
+#define HCLGE_PROMISC_EN_ALL 0x7
+#define HCLGE_PROMISC_EN_UC 0x1
+#define HCLGE_PROMISC_EN_MC 0x2
+#define HCLGE_PROMISC_EN_BC 0x4
+struct hclge_promisc_cfg_cmd {
+ u8 flag;
+ u8 vf_id;
+ __le16 rsv0;
+ u8 rsv1[20];
+};
+
+enum hclge_promisc_type {
+ HCLGE_UNICAST = 1,
+ HCLGE_MULTICAST = 2,
+ HCLGE_BROADCAST = 3,
+};
+
+#define HCLGE_MAC_TX_EN_B 6
+#define HCLGE_MAC_RX_EN_B 7
+#define HCLGE_MAC_PAD_TX_B 11
+#define HCLGE_MAC_PAD_RX_B 12
+#define HCLGE_MAC_1588_TX_B 13
+#define HCLGE_MAC_1588_RX_B 14
+#define HCLGE_MAC_APP_LP_B 15
+#define HCLGE_MAC_LINE_LP_B 16
+#define HCLGE_MAC_FCS_TX_B 17
+#define HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B 18
+#define HCLGE_MAC_RX_FCS_STRIP_B 19
+#define HCLGE_MAC_RX_FCS_B 20
+#define HCLGE_MAC_TX_UNDER_MIN_ERR_B 21
+#define HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B 22
+
+struct hclge_config_mac_mode_cmd {
+ __le32 txrx_pad_fcs_loop_en;
+ u8 rsv[20];
+};
+
+struct hclge_pf_rst_sync_cmd {
+#define HCLGE_PF_RST_ALL_VF_RDY_B 0
+ u8 all_vf_ready;
+ u8 rsv[23];
+};
+
+#define HCLGE_CFG_SPEED_S 0
+#define HCLGE_CFG_SPEED_M GENMASK(5, 0)
+
+#define HCLGE_CFG_DUPLEX_B 7
+#define HCLGE_CFG_DUPLEX_M BIT(HCLGE_CFG_DUPLEX_B)
+
+struct hclge_config_mac_speed_dup_cmd {
+ u8 speed_dup;
+
+#define HCLGE_CFG_MAC_SPEED_CHANGE_EN_B 0
+ u8 mac_change_fec_en;
+ u8 rsv[22];
+};
+
+#define HCLGE_RING_ID_MASK GENMASK(9, 0)
+#define HCLGE_TQP_ENABLE_B 0
+
+#define HCLGE_MAC_CFG_AN_EN_B 0
+#define HCLGE_MAC_CFG_AN_INT_EN_B 1
+#define HCLGE_MAC_CFG_AN_INT_MSK_B 2
+#define HCLGE_MAC_CFG_AN_INT_CLR_B 3
+#define HCLGE_MAC_CFG_AN_RST_B 4
+
+#define HCLGE_MAC_CFG_AN_EN BIT(HCLGE_MAC_CFG_AN_EN_B)
+
+struct hclge_config_auto_neg_cmd {
+ __le32 cfg_an_cmd_flag;
+ u8 rsv[20];
+};
+
+struct hclge_sfp_info_cmd {
+ __le32 speed;
+ u8 query_type; /* 0: sfp speed, 1: active speed */
+ u8 active_fec;
+ u8 autoneg; /* autoneg state */
+ u8 autoneg_ability; /* whether support autoneg */
+ __le32 speed_ability; /* speed ability for current media */
+ __le32 module_type;
+ u8 rsv[8];
+};
+
+#define HCLGE_MAC_CFG_FEC_AUTO_EN_B 0
+#define HCLGE_MAC_CFG_FEC_MODE_S 1
+#define HCLGE_MAC_CFG_FEC_MODE_M GENMASK(3, 1)
+#define HCLGE_MAC_CFG_FEC_SET_DEF_B 0
+#define HCLGE_MAC_CFG_FEC_CLR_DEF_B 1
+
+#define HCLGE_MAC_FEC_OFF 0
+#define HCLGE_MAC_FEC_BASER 1
+#define HCLGE_MAC_FEC_RS 2
+struct hclge_config_fec_cmd {
+ u8 fec_mode;
+ u8 default_config;
+ u8 rsv[22];
+};
+
+#define HCLGE_MAC_UPLINK_PORT 0x100
+
+struct hclge_config_max_frm_size_cmd {
+ __le16 max_frm_size;
+ u8 min_frm_size;
+ u8 rsv[21];
+};
+
+enum hclge_mac_vlan_tbl_opcode {
+ HCLGE_MAC_VLAN_ADD, /* Add new or modify mac_vlan */
+ HCLGE_MAC_VLAN_UPDATE, /* Modify other fields of this table */
+ HCLGE_MAC_VLAN_REMOVE, /* Remove a entry through mac_vlan key */
+ HCLGE_MAC_VLAN_LKUP, /* Lookup a entry through mac_vlan key */
+};
+
+enum hclge_mac_vlan_add_resp_code {
+ HCLGE_ADD_UC_OVERFLOW = 2, /* ADD failed for UC overflow */
+ HCLGE_ADD_MC_OVERFLOW, /* ADD failed for MC overflow */
+};
+
+#define HCLGE_MAC_VLAN_BIT0_EN_B 0
+#define HCLGE_MAC_VLAN_BIT1_EN_B 1
+#define HCLGE_MAC_EPORT_SW_EN_B 12
+#define HCLGE_MAC_EPORT_TYPE_B 11
+#define HCLGE_MAC_EPORT_VFID_S 3
+#define HCLGE_MAC_EPORT_VFID_M GENMASK(10, 3)
+#define HCLGE_MAC_EPORT_PFID_S 0
+#define HCLGE_MAC_EPORT_PFID_M GENMASK(2, 0)
+struct hclge_mac_vlan_tbl_entry_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ __le32 mac_addr_hi32;
+ __le16 mac_addr_lo16;
+ __le16 rsv1;
+ u8 entry_type;
+ u8 mc_mac_en;
+ __le16 egress_port;
+ __le16 egress_queue;
+ u8 rsv2[6];
+};
+
+#define HCLGE_UMV_SPC_ALC_B 0
+struct hclge_umv_spc_alc_cmd {
+ u8 allocate;
+ u8 rsv1[3];
+ __le32 space_size;
+ u8 rsv2[16];
+};
+
+#define HCLGE_MAC_MGR_MASK_VLAN_B BIT(0)
+#define HCLGE_MAC_MGR_MASK_MAC_B BIT(1)
+#define HCLGE_MAC_MGR_MASK_ETHERTYPE_B BIT(2)
+
+struct hclge_mac_mgr_tbl_entry_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ u8 mac_addr[ETH_ALEN];
+ __le16 rsv1;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ u8 sw_port_id_aware;
+ u8 rsv2;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rsv3[2];
+};
+
+struct hclge_vlan_filter_ctrl_cmd {
+ u8 vlan_type;
+ u8 vlan_fe;
+ u8 rsv1[2];
+ u8 vf_id;
+ u8 rsv2[19];
+};
+
+#define HCLGE_VLAN_ID_OFFSET_STEP 160
+#define HCLGE_VLAN_BYTE_SIZE 8
+#define HCLGE_VLAN_OFFSET_BITMAP \
+ (HCLGE_VLAN_ID_OFFSET_STEP / HCLGE_VLAN_BYTE_SIZE)
+
+struct hclge_vlan_filter_pf_cfg_cmd {
+ u8 vlan_offset;
+ u8 vlan_cfg;
+ u8 rsv[2];
+ u8 vlan_offset_bitmap[HCLGE_VLAN_OFFSET_BITMAP];
+};
+
+#define HCLGE_MAX_VF_BYTES 16
+
+struct hclge_vlan_filter_vf_cfg_cmd {
+ __le16 vlan_id;
+ u8 resp_code;
+ u8 rsv;
+ u8 vlan_cfg;
+ u8 rsv1[3];
+ u8 vf_bitmap[HCLGE_MAX_VF_BYTES];
+};
+
+#define HCLGE_SWITCH_ANTI_SPOOF_B 0U
+#define HCLGE_SWITCH_ALW_LPBK_B 1U
+#define HCLGE_SWITCH_ALW_LCL_LPBK_B 2U
+#define HCLGE_SWITCH_ALW_DST_OVRD_B 3U
+#define HCLGE_SWITCH_NO_MASK 0x0
+#define HCLGE_SWITCH_ANTI_SPOOF_MASK 0xFE
+#define HCLGE_SWITCH_ALW_LPBK_MASK 0xFD
+#define HCLGE_SWITCH_ALW_LCL_LPBK_MASK 0xFB
+#define HCLGE_SWITCH_LW_DST_OVRD_MASK 0xF7
+
+struct hclge_mac_vlan_switch_cmd {
+ u8 roce_sel;
+ u8 rsv1[3];
+ __le32 func_id;
+ u8 switch_param;
+ u8 rsv2[3];
+ u8 param_mask;
+ u8 rsv3[11];
+};
+
+enum hclge_mac_vlan_cfg_sel {
+ HCLGE_MAC_VLAN_NIC_SEL = 0,
+ HCLGE_MAC_VLAN_ROCE_SEL,
+};
+
+#define HCLGE_ACCEPT_TAG1_B 0
+#define HCLGE_ACCEPT_UNTAG1_B 1
+#define HCLGE_PORT_INS_TAG1_EN_B 2
+#define HCLGE_PORT_INS_TAG2_EN_B 3
+#define HCLGE_CFG_NIC_ROCE_SEL_B 4
+#define HCLGE_ACCEPT_TAG2_B 5
+#define HCLGE_ACCEPT_UNTAG2_B 6
+#define HCLGE_VF_NUM_PER_BYTE 8
+
+struct hclge_vport_vtag_tx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[2];
+ __le16 def_vlan_tag1;
+ __le16 def_vlan_tag2;
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
+ u8 rsv2[8];
+};
+
+#define HCLGE_REM_TAG1_EN_B 0
+#define HCLGE_REM_TAG2_EN_B 1
+#define HCLGE_SHOW_TAG1_EN_B 2
+#define HCLGE_SHOW_TAG2_EN_B 3
+struct hclge_vport_vtag_rx_cfg_cmd {
+ u8 vport_vlan_cfg;
+ u8 vf_offset;
+ u8 rsv1[6];
+ u8 vf_bitmap[HCLGE_VF_NUM_PER_BYTE];
+ u8 rsv2[8];
+};
+
+struct hclge_tx_vlan_type_cfg_cmd {
+ __le16 ot_vlan_type;
+ __le16 in_vlan_type;
+ u8 rsv[20];
+};
+
+struct hclge_rx_vlan_type_cfg_cmd {
+ __le16 ot_fst_vlan_type;
+ __le16 ot_sec_vlan_type;
+ __le16 in_fst_vlan_type;
+ __le16 in_sec_vlan_type;
+ u8 rsv[16];
+};
+
+struct hclge_cfg_com_tqp_queue_cmd {
+ __le16 tqp_id;
+ __le16 stream_id;
+ u8 enable;
+ u8 rsv[19];
+};
+
+struct hclge_cfg_tx_queue_pointer_cmd {
+ __le16 tqp_id;
+ __le16 tx_tail;
+ __le16 tx_head;
+ __le16 fbd_num;
+ __le16 ring_offset;
+ u8 rsv[14];
+};
+
+#pragma pack(1)
+struct hclge_mac_ethertype_idx_rd_cmd {
+ u8 flags;
+ u8 resp_code;
+ __le16 vlan_tag;
+ u8 mac_addr[ETH_ALEN];
+ __le16 index;
+ __le16 ethter_type;
+ __le16 egress_port;
+ __le16 egress_queue;
+ __le16 rev0;
+ u8 i_port_bitmap;
+ u8 i_port_direction;
+ u8 rev1[2];
+};
+
+#pragma pack()
+
+#define HCLGE_TSO_MSS_MIN_S 0
+#define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0)
+
+#define HCLGE_TSO_MSS_MAX_S 16
+#define HCLGE_TSO_MSS_MAX_M GENMASK(29, 16)
+
+struct hclge_cfg_tso_status_cmd {
+ __le16 tso_mss_min;
+ __le16 tso_mss_max;
+ u8 rsv[20];
+};
+
+#define HCLGE_GRO_EN_B 0
+struct hclge_cfg_gro_status_cmd {
+ u8 gro_en;
+ u8 rsv[23];
+};
+
+#define HCLGE_TSO_MSS_MIN 256
+#define HCLGE_TSO_MSS_MAX 9668
+
+#define HCLGE_TQP_RESET_B 0
+struct hclge_reset_tqp_queue_cmd {
+ __le16 tqp_id;
+ u8 reset_req;
+ u8 ready_to_reset;
+ u8 rsv[20];
+};
+
+#define HCLGE_CFG_RESET_MAC_B 3
+#define HCLGE_CFG_RESET_FUNC_B 7
+struct hclge_reset_cmd {
+ u8 mac_func_reset;
+ u8 fun_reset_vfid;
+ u8 rsv[22];
+};
+
+#define HCLGE_PF_RESET_DONE_BIT BIT(0)
+
+struct hclge_pf_rst_done_cmd {
+ u8 pf_rst_done;
+ u8 rsv[23];
+};
+
+#define HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B BIT(0)
+#define HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B BIT(2)
+#define HCLGE_CMD_SERDES_DONE_B BIT(0)
+#define HCLGE_CMD_SERDES_SUCCESS_B BIT(1)
+struct hclge_serdes_lb_cmd {
+ u8 mask;
+ u8 enable;
+ u8 result;
+ u8 rsv[21];
+};
+
+#define HCLGE_DEFAULT_TX_BUF 0x4000 /* 16k bytes */
+#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
+#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
+#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
+#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x1400 /* 5120 byte */
+
+#define HCLGE_TYPE_CRQ 0
+#define HCLGE_TYPE_CSQ 1
+#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGE_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGE_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c
+#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGE_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGE_NIC_CRQ_HEAD_REG 0x27028
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGE_NIC_SW_RST_RDY_B 16
+#define HCLGE_NIC_SW_RST_RDY BIT(HCLGE_NIC_SW_RST_RDY_B)
+
+#define HCLGE_NIC_CMQ_DESC_NUM 1024
+#define HCLGE_NIC_CMQ_DESC_NUM_S 3
+
+#define HCLGE_LED_LOCATE_STATE_S 0
+#define HCLGE_LED_LOCATE_STATE_M GENMASK(1, 0)
+
+struct hclge_set_led_state_cmd {
+ u8 rsv1[3];
+ u8 locate_led_config;
+ u8 rsv2[20];
+};
+
+struct hclge_get_fd_mode_cmd {
+ u8 mode;
+ u8 enable;
+ u8 rsv[22];
+};
+
+struct hclge_get_fd_allocation_cmd {
+ __le32 stage1_entry_num;
+ __le32 stage2_entry_num;
+ __le16 stage1_counter_num;
+ __le16 stage2_counter_num;
+ u8 rsv[12];
+};
+
+struct hclge_set_fd_key_config_cmd {
+ u8 stage;
+ u8 key_select;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u8 rsv1[2];
+ __le32 tuple_mask;
+ __le32 meta_data_mask;
+ u8 rsv2[8];
+};
+
+#define HCLGE_FD_EPORT_SW_EN_B 0
+struct hclge_fd_tcam_config_1_cmd {
+ u8 stage;
+ u8 xy_sel;
+ u8 port_info;
+ u8 rsv1[1];
+ __le32 index;
+ u8 entry_vld;
+ u8 rsv2[7];
+ u8 tcam_data[8];
+};
+
+struct hclge_fd_tcam_config_2_cmd {
+ u8 tcam_data[24];
+};
+
+struct hclge_fd_tcam_config_3_cmd {
+ u8 tcam_data[20];
+ u8 rsv[4];
+};
+
+#define HCLGE_FD_AD_DROP_B 0
+#define HCLGE_FD_AD_DIRECT_QID_B 1
+#define HCLGE_FD_AD_QID_S 2
+#define HCLGE_FD_AD_QID_M GENMASK(11, 2)
+#define HCLGE_FD_AD_USE_COUNTER_B 12
+#define HCLGE_FD_AD_COUNTER_NUM_S 13
+#define HCLGE_FD_AD_COUNTER_NUM_M GENMASK(20, 13)
+#define HCLGE_FD_AD_NXT_STEP_B 20
+#define HCLGE_FD_AD_NXT_KEY_S 21
+#define HCLGE_FD_AD_NXT_KEY_M GENMASK(25, 21)
+#define HCLGE_FD_AD_WR_RULE_ID_B 0
+#define HCLGE_FD_AD_RULE_ID_S 1
+#define HCLGE_FD_AD_RULE_ID_M GENMASK(12, 1)
+
+struct hclge_fd_ad_config_cmd {
+ u8 stage;
+ u8 rsv1[3];
+ __le32 index;
+ __le64 ad_data;
+ u8 rsv2[8];
+};
+
+struct hclge_get_m7_bd_cmd {
+ __le32 bd_num;
+ u8 rsv[20];
+};
+
+struct hclge_query_ppu_pf_other_int_dfx_cmd {
+ __le16 over_8bd_no_fe_qid;
+ __le16 over_8bd_no_fe_vf_id;
+ __le16 tso_mss_cmp_min_err_qid;
+ __le16 tso_mss_cmp_min_err_vf_id;
+ __le16 tso_mss_cmp_max_err_qid;
+ __le16 tso_mss_cmp_max_err_vf_id;
+ __le16 tx_rd_fbd_poison_qid;
+ __le16 tx_rd_fbd_poison_vf_id;
+ __le16 rx_rd_fbd_poison_qid;
+ __le16 rx_rd_fbd_poison_vf_id;
+ u8 rsv[4];
+};
+
+#define HCLGE_LINK_EVENT_REPORT_EN_B 0
+#define HCLGE_NCSI_ERROR_REPORT_EN_B 1
+struct hclge_firmware_compat_cmd {
+ __le32 compat;
+ u8 rsv[20];
+};
+
+#define HCLGE_SFP_INFO_CMD_NUM 6
+#define HCLGE_SFP_INFO_BD0_LEN 20
+#define HCLGE_SFP_INFO_BDX_LEN 24
+#define HCLGE_SFP_INFO_MAX_LEN \
+ (HCLGE_SFP_INFO_BD0_LEN + \
+ (HCLGE_SFP_INFO_CMD_NUM - 1) * HCLGE_SFP_INFO_BDX_LEN)
+
+struct hclge_sfp_info_bd0_cmd {
+ __le16 offset;
+ __le16 read_len;
+ u8 data[HCLGE_SFP_INFO_BD0_LEN];
+};
+
+#define HCLGE_QUERY_DEV_SPECS_BD_NUM 4
+
+struct hclge_dev_specs_0_cmd {
+ __le32 rsv0;
+ __le32 mac_entry_num;
+ __le32 mng_entry_num;
+ __le16 rss_ind_tbl_size;
+ __le16 rss_key_size;
+ __le16 int_ql_max;
+ u8 max_non_tso_bd_num;
+ u8 rsv1;
+ __le32 max_tm_rate;
+};
+
+int hclge_cmd_init(struct hclge_dev *hdev);
+static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+#define hclge_write_dev(a, reg, value) \
+ hclge_write_reg((a)->io_base, (reg), (value))
+#define hclge_read_dev(a, reg) \
+ hclge_read_reg((a)->io_base, (reg))
+
+static inline u32 hclge_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define HCLGE_SEND_SYNC(flag) \
+ ((flag) & HCLGE_CMD_FLAG_NO_INTR)
+
+struct hclge_hw;
+int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num);
+void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
+ enum hclge_opcode_type opcode, bool is_read);
+void hclge_cmd_reuse_desc(struct hclge_desc *desc, bool is_read);
+
+enum hclge_cmd_status hclge_cmd_mdio_write(struct hclge_hw *hw,
+ struct hclge_desc *desc);
+enum hclge_cmd_status hclge_cmd_mdio_read(struct hclge_hw *hw,
+ struct hclge_desc *desc);
+
+void hclge_cmd_uninit(struct hclge_dev *hdev);
+int hclge_cmd_queue_init(struct hclge_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
new file mode 100644
index 000000000..d60b8dfe3
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_dcb.h"
+#include "hclge_tm.h"
+#include "hclge_dcb.h"
+#include "hnae3.h"
+
+#define BW_PERCENT 100
+
+static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev,
+ struct ieee_ets *ets)
+{
+ u8 i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ hdev->tm_info.tc_info[i].tc_sch_mode =
+ HCLGE_SCH_MODE_SP;
+ hdev->tm_info.pg_info[0].tc_dwrr[i] = 0;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ hdev->tm_info.tc_info[i].tc_sch_mode =
+ HCLGE_SCH_MODE_DWRR;
+ hdev->tm_info.pg_info[0].tc_dwrr[i] =
+ ets->tc_tx_bw[i];
+ break;
+ default:
+ /* Hardware only supports SP (strict priority)
+ * or ETS (enhanced transmission selection)
+ * algorithms, if we receive some other value
+ * from dcbnl, then throw an error.
+ */
+ return -EINVAL;
+ }
+ }
+
+ hclge_tm_prio_tc_info_update(hdev, ets->prio_tc);
+
+ return 0;
+}
+
+static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev,
+ struct ieee_ets *ets)
+{
+ u32 i;
+
+ memset(ets, 0, sizeof(*ets));
+ ets->willing = 1;
+ ets->ets_cap = hdev->tc_max;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ ets->prio_tc[i] = hdev->tm_info.prio_tc[i];
+ if (i < hdev->tm_info.num_tc)
+ ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i];
+ else
+ ets->tc_tx_bw[i] = 0;
+
+ if (hdev->tm_info.tc_info[i].tc_sch_mode ==
+ HCLGE_SCH_MODE_SP)
+ ets->tc_tsa[i] = IEEE_8021QAZ_TSA_STRICT;
+ else
+ ets->tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
+ }
+}
+
+/* IEEE std */
+static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_tm_info_to_ieee_ets(hdev, ets);
+
+ return 0;
+}
+
+static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc,
+ u8 *prio_tc)
+{
+ int i;
+
+ if (num_tc > hdev->tc_max) {
+ dev_err(&hdev->pdev->dev,
+ "tc num checking failed, %u > tc_max(%u)\n",
+ num_tc, hdev->tc_max);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ if (prio_tc[i] >= num_tc) {
+ dev_err(&hdev->pdev->dev,
+ "prio_tc[%d] checking failed, %u >= num_tc(%u)\n",
+ i, prio_tc[i], num_tc);
+ return -EINVAL;
+ }
+ }
+
+ if (num_tc > hdev->vport[0].alloc_tqps) {
+ dev_err(&hdev->pdev->dev,
+ "allocated tqp checking failed, %u > tqp(%u)\n",
+ num_tc, hdev->vport[0].alloc_tqps);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets,
+ bool *changed)
+{
+ u8 max_tc_id = 0;
+ u8 i;
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i])
+ *changed = true;
+
+ if (ets->prio_tc[i] > max_tc_id)
+ max_tc_id = ets->prio_tc[i];
+ }
+
+ /* return max tc number, max tc id need to plus 1 */
+ return max_tc_id + 1;
+}
+
+static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev,
+ struct ieee_ets *ets, bool *changed,
+ u8 tc_num)
+{
+ bool has_ets_tc = false;
+ u32 total_ets_bw = 0;
+ u8 i;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ switch (ets->tc_tsa[i]) {
+ case IEEE_8021QAZ_TSA_STRICT:
+ if (hdev->tm_info.tc_info[i].tc_sch_mode !=
+ HCLGE_SCH_MODE_SP)
+ *changed = true;
+ break;
+ case IEEE_8021QAZ_TSA_ETS:
+ if (i >= tc_num) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u is disabled, cannot set ets bw\n",
+ i);
+ return -EINVAL;
+ }
+
+ /* The hardware will switch to sp mode if bandwidth is
+ * 0, so limit ets bandwidth must be greater than 0.
+ */
+ if (!ets->tc_tx_bw[i]) {
+ dev_err(&hdev->pdev->dev,
+ "tc%u ets bw cannot be 0\n", i);
+ return -EINVAL;
+ }
+
+ if (hdev->tm_info.tc_info[i].tc_sch_mode !=
+ HCLGE_SCH_MODE_DWRR)
+ *changed = true;
+
+ total_ets_bw += ets->tc_tx_bw[i];
+ has_ets_tc = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ if (has_ets_tc && total_ets_bw != BW_PERCENT)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets,
+ u8 *tc, bool *changed)
+{
+ u8 tc_num;
+ int ret;
+
+ tc_num = hclge_ets_tc_changed(hdev, ets, changed);
+
+ ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc);
+ if (ret)
+ return ret;
+
+ ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num);
+ if (ret)
+ return ret;
+
+ *tc = tc_num;
+ if (*tc != hdev->tm_info.num_tc)
+ *changed = true;
+
+ return 0;
+}
+
+static int hclge_map_update(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_tm_schd_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_pause_setup_hw(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret)
+ return ret;
+
+ hclge_rss_indir_init_cfg(hdev);
+
+ return hclge_rss_init_hw(hdev);
+}
+
+static int hclge_client_setup_tc(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_client *client;
+ struct hnae3_handle *handle;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ handle = &vport[i].nic;
+ client = handle->client;
+
+ if (!client || !client->ops || !client->ops->setup_tc)
+ continue;
+
+ ret = client->ops->setup_tc(handle, hdev->tm_info.num_tc);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_notify_down_uinit(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+}
+
+static int hclge_notify_init_up(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
+static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ bool map_changed = false;
+ u8 num_tc = 0;
+ int ret;
+
+ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) ||
+ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ return -EINVAL;
+
+ ret = hclge_ets_validate(hdev, ets, &num_tc, &map_changed);
+ if (ret)
+ return ret;
+
+ if (map_changed) {
+ netif_dbg(h, drv, netdev, "set ets\n");
+
+ ret = hclge_notify_down_uinit(hdev);
+ if (ret)
+ return ret;
+ }
+
+ hclge_tm_schd_info_update(hdev, num_tc);
+ if (num_tc > 1)
+ hdev->flag |= HCLGE_FLAG_DCB_ENABLE;
+ else
+ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+
+ ret = hclge_ieee_ets_to_tm_info(hdev, ets);
+ if (ret)
+ goto err_out;
+
+ if (map_changed) {
+ ret = hclge_map_update(hdev);
+ if (ret)
+ goto err_out;
+
+ ret = hclge_client_setup_tc(hdev);
+ if (ret)
+ goto err_out;
+
+ ret = hclge_notify_init_up(hdev);
+ if (ret)
+ return ret;
+ }
+
+ return hclge_tm_dwrr_cfg(hdev);
+
+err_out:
+ if (!map_changed)
+ return ret;
+
+ hclge_notify_init_up(hdev);
+
+ return ret;
+}
+
+static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
+{
+ u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC];
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+ u8 i;
+
+ memset(pfc, 0, sizeof(*pfc));
+ pfc->pfc_cap = hdev->pfc_max;
+ pfc->pfc_en = hdev->tm_info.pfc_en;
+
+ ret = hclge_pfc_tx_stats_get(hdev, requests);
+ if (ret)
+ return ret;
+
+ ret = hclge_pfc_rx_stats_get(hdev, indications);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ pfc->requests[i] = requests[i];
+ pfc->indications[i] = indications[i];
+ }
+ return 0;
+}
+
+static int hclge_ieee_setpfc(struct hnae3_handle *h, struct ieee_pfc *pfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+ u8 i, j, pfc_map, *prio_tc;
+ int ret;
+
+ if (!(hdev->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
+ return -EINVAL;
+
+ if (pfc->pfc_en == hdev->tm_info.pfc_en)
+ return 0;
+
+ prio_tc = hdev->tm_info.prio_tc;
+ pfc_map = 0;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) {
+ if ((prio_tc[j] == i) && (pfc->pfc_en & BIT(j))) {
+ pfc_map |= BIT(i);
+ break;
+ }
+ }
+ }
+
+ hdev->tm_info.hw_pfc_map = pfc_map;
+ hdev->tm_info.pfc_en = pfc->pfc_en;
+
+ netif_dbg(h, drv, netdev,
+ "set pfc: pfc_en=%x, pfc_map=%x, num_tc=%u\n",
+ pfc->pfc_en, pfc_map, hdev->tm_info.num_tc);
+
+ hclge_tm_pfc_info_update(hdev);
+
+ ret = hclge_pause_setup_hw(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret) {
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ return ret;
+ }
+
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
+/* DCBX configuration */
+static u8 hclge_getdcbx(struct hnae3_handle *h)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE)
+ return 0;
+
+ return hdev->dcbx_cap;
+}
+
+static u8 hclge_setdcbx(struct hnae3_handle *h, u8 mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct net_device *netdev = h->kinfo.netdev;
+ struct hclge_dev *hdev = vport->back;
+
+ netif_dbg(h, drv, netdev, "set dcbx: mode=%u\n", mode);
+
+ /* No support for LLD_MANAGED modes or CEE */
+ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
+ (mode & DCB_CAP_DCBX_VER_CEE) ||
+ !(mode & DCB_CAP_DCBX_HOST))
+ return 1;
+
+ hdev->dcbx_cap = mode;
+
+ return 0;
+}
+
+/* Set up TC for hardware offloaded mqprio in channel mode */
+static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc)
+{
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (hdev->flag & HCLGE_FLAG_DCB_ENABLE)
+ return -EINVAL;
+
+ ret = hclge_dcb_common_validate(hdev, tc, prio_tc);
+ if (ret)
+ return -EINVAL;
+
+ ret = hclge_notify_down_uinit(hdev);
+ if (ret)
+ return ret;
+
+ hclge_tm_schd_info_update(hdev, tc);
+ hclge_tm_prio_tc_info_update(hdev, prio_tc);
+
+ ret = hclge_tm_init_hw(hdev, false);
+ if (ret)
+ goto err_out;
+
+ ret = hclge_client_setup_tc(hdev);
+ if (ret)
+ goto err_out;
+
+ hdev->flag &= ~HCLGE_FLAG_DCB_ENABLE;
+
+ if (tc > 1)
+ hdev->flag |= HCLGE_FLAG_MQPRIO_ENABLE;
+ else
+ hdev->flag &= ~HCLGE_FLAG_MQPRIO_ENABLE;
+
+ return hclge_notify_init_up(hdev);
+
+err_out:
+ hclge_notify_init_up(hdev);
+
+ return ret;
+}
+
+static const struct hnae3_dcb_ops hns3_dcb_ops = {
+ .ieee_getets = hclge_ieee_getets,
+ .ieee_setets = hclge_ieee_setets,
+ .ieee_getpfc = hclge_ieee_getpfc,
+ .ieee_setpfc = hclge_ieee_setpfc,
+ .getdcbx = hclge_getdcbx,
+ .setdcbx = hclge_setdcbx,
+ .setup_tc = hclge_setup_tc,
+};
+
+void hclge_dcb_ops_set(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_knic_private_info *kinfo;
+
+ /* Hdev does not support DCB or vport is
+ * not a pf, then dcb_ops is not set.
+ */
+ if (!hnae3_dev_dcb_supported(hdev) ||
+ vport->vport_id != 0)
+ return;
+
+ kinfo = &vport->nic.kinfo;
+ kinfo->dcb_ops = &hns3_dcb_ops;
+ hdev->dcbx_cap = DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_HOST;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
new file mode 100644
index 000000000..b04702e65
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_DCB_H__
+#define __HCLGE_DCB_H__
+
+#include "hclge_main.h"
+
+#ifdef CONFIG_HNS3_DCB
+void hclge_dcb_ops_set(struct hclge_dev *hdev);
+#else
+static inline void hclge_dcb_ops_set(struct hclge_dev *hdev) {}
+#endif
+
+#endif /* __HCLGE_DCB_H__ */
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
new file mode 100644
index 000000000..16df050e7
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c
@@ -0,0 +1,1557 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#include <linux/device.h>
+
+#include "hclge_debugfs.h"
+#include "hclge_main.h"
+#include "hclge_tm.h"
+#include "hnae3.h"
+
+static const struct hclge_dbg_reg_type_info hclge_dbg_reg_info[] = {
+ { .reg_type = "bios common",
+ .dfx_msg = &hclge_dbg_bios_common_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_bios_common_reg),
+ .offset = HCLGE_DBG_DFX_BIOS_OFFSET,
+ .cmd = HCLGE_OPC_DFX_BIOS_COMMON_REG } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_0),
+ .offset = HCLGE_DBG_DFX_SSU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_0 } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_1),
+ .offset = HCLGE_DBG_DFX_SSU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_1 } },
+ { .reg_type = "ssu",
+ .dfx_msg = &hclge_dbg_ssu_reg_2[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ssu_reg_2),
+ .offset = HCLGE_DBG_DFX_SSU_2_OFFSET,
+ .cmd = HCLGE_OPC_DFX_SSU_REG_2 } },
+ { .reg_type = "igu egu",
+ .dfx_msg = &hclge_dbg_igu_egu_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_igu_egu_reg),
+ .offset = HCLGE_DBG_DFX_IGU_OFFSET,
+ .cmd = HCLGE_OPC_DFX_IGU_EGU_REG } },
+ { .reg_type = "rpu",
+ .dfx_msg = &hclge_dbg_rpu_reg_0[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_0),
+ .offset = HCLGE_DBG_DFX_RPU_0_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_0 } },
+ { .reg_type = "rpu",
+ .dfx_msg = &hclge_dbg_rpu_reg_1[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rpu_reg_1),
+ .offset = HCLGE_DBG_DFX_RPU_1_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RPU_REG_1 } },
+ { .reg_type = "ncsi",
+ .dfx_msg = &hclge_dbg_ncsi_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ncsi_reg),
+ .offset = HCLGE_DBG_DFX_NCSI_OFFSET,
+ .cmd = HCLGE_OPC_DFX_NCSI_REG } },
+ { .reg_type = "rtc",
+ .dfx_msg = &hclge_dbg_rtc_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rtc_reg),
+ .offset = HCLGE_DBG_DFX_RTC_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RTC_REG } },
+ { .reg_type = "ppp",
+ .dfx_msg = &hclge_dbg_ppp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_ppp_reg),
+ .offset = HCLGE_DBG_DFX_PPP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_PPP_REG } },
+ { .reg_type = "rcb",
+ .dfx_msg = &hclge_dbg_rcb_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_rcb_reg),
+ .offset = HCLGE_DBG_DFX_RCB_OFFSET,
+ .cmd = HCLGE_OPC_DFX_RCB_REG } },
+ { .reg_type = "tqp",
+ .dfx_msg = &hclge_dbg_tqp_reg[0],
+ .reg_msg = { .msg_num = ARRAY_SIZE(hclge_dbg_tqp_reg),
+ .offset = HCLGE_DBG_DFX_TQP_OFFSET,
+ .cmd = HCLGE_OPC_DFX_TQP_REG } },
+};
+
+static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset)
+{
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+ int entries_per_desc;
+ int index;
+ int ret;
+
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "get dfx bdnum fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ index = offset % entries_per_desc;
+ return le32_to_cpu(desc[offset / entries_per_desc].data[index]);
+}
+
+static int hclge_dbg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src,
+ int index, int bd_num,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc = desc_src;
+ int ret, i;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ desc->data[0] = cpu_to_le32(index);
+
+ for (i = 1; i < bd_num; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "cmd(0x%x) send fail, ret = %d\n", cmd, ret);
+ return ret;
+}
+
+static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev,
+ const struct hclge_dbg_reg_type_info *reg_info,
+ const char *cmd_buf)
+{
+#define IDX_OFFSET 1
+
+ const char *s = &cmd_buf[strlen(reg_info->reg_type) + IDX_OFFSET];
+ const struct hclge_dbg_dfx_message *dfx_message = reg_info->dfx_msg;
+ const struct hclge_dbg_reg_common_msg *reg_msg = &reg_info->reg_msg;
+ struct hclge_desc *desc_src;
+ struct hclge_desc *desc;
+ int entries_per_desc;
+ int bd_num, buf_len;
+ int index = 0;
+ int min_num;
+ int ret, i;
+
+ if (*s) {
+ ret = kstrtouint(s, 0, &index);
+ index = (ret != 0) ? 0 : index;
+ }
+
+ bd_num = hclge_dbg_get_dfx_bd_num(hdev, reg_msg->offset);
+ if (bd_num <= 0) {
+ dev_err(&hdev->pdev->dev, "get cmd(%d) bd num(%d) failed\n",
+ reg_msg->offset, bd_num);
+ return;
+ }
+
+ buf_len = sizeof(struct hclge_desc) * bd_num;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src)
+ return;
+
+ desc = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, reg_msg->cmd);
+ if (ret) {
+ kfree(desc_src);
+ return;
+ }
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ min_num = min_t(int, bd_num * entries_per_desc, reg_msg->msg_num);
+
+ desc = desc_src;
+ for (i = 0; i < min_num; i++) {
+ if (i > 0 && (i % entries_per_desc) == 0)
+ desc++;
+ if (dfx_message->flag)
+ dev_info(&hdev->pdev->dev, "%s: 0x%x\n",
+ dfx_message->message,
+ le32_to_cpu(desc->data[i % entries_per_desc]));
+
+ dfx_message++;
+ }
+
+ kfree(desc_src);
+}
+
+static void hclge_dbg_dump_mac_enable_status(struct hclge_dev *hdev)
+{
+ struct hclge_config_mac_mode_cmd *req;
+ struct hclge_desc desc;
+ u32 loop_en;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac enable status, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_mac_mode_cmd *)desc.data;
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+
+ dev_info(&hdev->pdev->dev, "config_mac_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_EN_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_EN_B));
+ dev_info(&hdev->pdev->dev, "config_pad_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_TX_B));
+ dev_info(&hdev->pdev->dev, "config_pad_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_PAD_RX_B));
+ dev_info(&hdev->pdev->dev, "config_1588_trans_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_TX_B));
+ dev_info(&hdev->pdev->dev, "config_1588_rcv_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_1588_RX_B));
+ dev_info(&hdev->pdev->dev, "config_mac_app_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_APP_LP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_line_loop_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_LINE_LP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_fcs_tx_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_FCS_TX_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_strip_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B));
+ dev_info(&hdev->pdev->dev, "config_mac_rx_fcs_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_RX_FCS_B));
+ dev_info(&hdev->pdev->dev, "config_mac_tx_under_min_err_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B));
+ dev_info(&hdev->pdev->dev, "config_mac_tx_oversize_truncate_en: %#x\n",
+ hnae3_get_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B));
+}
+
+static void hclge_dbg_dump_mac_frame_size(struct hclge_dev *hdev)
+{
+ struct hclge_config_max_frm_size_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac frame size, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "max_frame_size: %u\n",
+ le16_to_cpu(req->max_frm_size));
+ dev_info(&hdev->pdev->dev, "min_frame_size: %u\n", req->min_frm_size);
+}
+
+static void hclge_dbg_dump_mac_speed_duplex(struct hclge_dev *hdev)
+{
+#define HCLGE_MAC_SPEED_SHIFT 0
+#define HCLGE_MAC_SPEED_MASK GENMASK(5, 0)
+#define HCLGE_MAC_DUPLEX_SHIFT 7
+
+ struct hclge_config_mac_speed_dup_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump mac speed duplex, ret = %d\n", ret);
+ return;
+ }
+
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "speed: %#lx\n",
+ hnae3_get_field(req->speed_dup, HCLGE_MAC_SPEED_MASK,
+ HCLGE_MAC_SPEED_SHIFT));
+ dev_info(&hdev->pdev->dev, "duplex: %#x\n",
+ hnae3_get_bit(req->speed_dup, HCLGE_MAC_DUPLEX_SHIFT));
+}
+
+static void hclge_dbg_dump_mac(struct hclge_dev *hdev)
+{
+ hclge_dbg_dump_mac_enable_status(hdev);
+
+ hclge_dbg_dump_mac_frame_size(hdev);
+
+ hclge_dbg_dump_mac_speed_duplex(hdev);
+}
+
+static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, const char *cmd_buf)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_dbg_bitmap_cmd *bitmap;
+ enum hclge_opcode_type cmd;
+ int rq_id, pri_id, qset_id;
+ int port_id, nq_id, pg_id;
+ struct hclge_desc desc[2];
+
+ int cnt, ret;
+
+ cnt = sscanf(cmd_buf, "%i %i %i %i %i %i",
+ &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id);
+ if (cnt != 6) {
+ dev_err(&hdev->pdev->dev,
+ "dump dcb: bad command parameter, cnt=%d\n", cnt);
+ return;
+ }
+
+ cmd = HCLGE_OPC_QSET_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, cmd);
+ if (ret)
+ goto err_dcb_cmd_send;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1);
+ dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2);
+ dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3);
+
+ cmd = HCLGE_OPC_PRI_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, cmd);
+ if (ret)
+ goto err_dcb_cmd_send;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1);
+ dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2);
+
+ cmd = HCLGE_OPC_PG_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, cmd);
+ if (ret)
+ goto err_dcb_cmd_send;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1);
+ dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2);
+
+ cmd = HCLGE_OPC_PORT_DFX_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
+ if (ret)
+ goto err_dcb_cmd_send;
+
+ bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1];
+ dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0);
+ dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1);
+
+ cmd = HCLGE_OPC_SCH_NQ_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
+ if (ret)
+ goto err_dcb_cmd_send;
+
+ dev_info(dev, "sch_nq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+
+ cmd = HCLGE_OPC_SCH_RQ_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, cmd);
+ if (ret)
+ goto err_dcb_cmd_send;
+
+ dev_info(dev, "sch_rq_cnt: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+
+ cmd = HCLGE_OPC_TM_INTERNAL_STS;
+ ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, cmd);
+ if (ret)
+ goto err_dcb_cmd_send;
+
+ dev_info(dev, "pri_bp: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "fifo_dfx_info: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n",
+ le32_to_cpu(desc[0].data[3]));
+ dev_info(dev, "tx_private_waterline: 0x%x\n",
+ le32_to_cpu(desc[0].data[4]));
+ dev_info(dev, "tm_bypass_en: 0x%x\n", le32_to_cpu(desc[0].data[5]));
+ dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", le32_to_cpu(desc[1].data[0]));
+ dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", le32_to_cpu(desc[1].data[1]));
+
+ cmd = HCLGE_OPC_TM_INTERNAL_CNT;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
+ if (ret)
+ goto err_dcb_cmd_send;
+
+ dev_info(dev, "SCH_NIC_NUM: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+
+ cmd = HCLGE_OPC_TM_INTERNAL_STS_1;
+ ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, cmd);
+ if (ret)
+ goto err_dcb_cmd_send;
+
+ dev_info(dev, "TC_MAP_SEL: 0x%x\n", le32_to_cpu(desc[0].data[1]));
+ dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[2]));
+ dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", le32_to_cpu(desc[0].data[3]));
+ dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n",
+ le32_to_cpu(desc[0].data[4]));
+ dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n",
+ le32_to_cpu(desc[0].data[5]));
+ return;
+
+err_dcb_cmd_send:
+ dev_err(&hdev->pdev->dev,
+ "failed to dump dcb dfx, cmd = %#x, ret = %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, const char *cmd_buf)
+{
+ const struct hclge_dbg_reg_type_info *reg_info;
+ bool has_dump = false;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_dbg_reg_info); i++) {
+ reg_info = &hclge_dbg_reg_info[i];
+ if (!strncmp(cmd_buf, reg_info->reg_type,
+ strlen(reg_info->reg_type))) {
+ hclge_dbg_dump_reg_common(hdev, reg_info, cmd_buf);
+ has_dump = true;
+ }
+ }
+
+ if (strncmp(cmd_buf, "mac", strlen("mac")) == 0) {
+ hclge_dbg_dump_mac(hdev);
+ has_dump = true;
+ }
+
+ if (strncmp(cmd_buf, "dcb", 3) == 0) {
+ hclge_dbg_dump_dcb(hdev, &cmd_buf[sizeof("dcb")]);
+ has_dump = true;
+ }
+
+ if (!has_dump) {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return;
+ }
+}
+
+static void hclge_print_tc_info(struct hclge_dev *hdev, bool flag, int index)
+{
+ if (flag)
+ dev_info(&hdev->pdev->dev, "tc(%d): no sp mode weight: %u\n",
+ index, hdev->tm_info.pg_info[0].tc_dwrr[index]);
+ else
+ dev_info(&hdev->pdev->dev, "tc(%d): sp mode\n", index);
+}
+
+static void hclge_dbg_dump_tc(struct hclge_dev *hdev)
+{
+ struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_desc desc;
+ int i, ret;
+
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tc\n");
+ return;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "dump tc fail, ret = %d\n", ret);
+ return;
+ }
+
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "dump tc: %u tc enabled\n",
+ hdev->tm_info.num_tc);
+ dev_info(&hdev->pdev->dev, "weight_offset: %u\n",
+ ets_weight->weight_offset);
+
+ for (i = 0; i < HNAE3_MAX_TC; i++)
+ hclge_print_tc_info(hdev, ets_weight->tc_weight[i], i);
+}
+
+static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev)
+{
+ struct hclge_port_shapping_cmd *port_shap_cfg_cmd;
+ struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+ struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ cmd = HCLGE_OPC_TM_PG_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n",
+ le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
+
+ cmd = HCLGE_OPC_TM_PG_P_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n",
+ le32_to_cpu(pg_shap_cfg_cmd->pg_shapping_para));
+
+ cmd = HCLGE_OPC_TM_PORT_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n",
+ le32_to_cpu(port_shap_cfg_cmd->port_shapping_para));
+
+ cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n",
+ le32_to_cpu(desc.data[0]));
+
+ cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "PRI_SCH pri_id: %u\n",
+ le32_to_cpu(desc.data[0]));
+
+ cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "QS_SCH qs_id: %u\n",
+ le32_to_cpu(desc.data[0]));
+
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
+ cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_pg_cmd_send;
+
+ bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET tc_id: %u\n",
+ bp_to_qs_map_cmd->tc_id);
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_group_id: 0x%x\n",
+ bp_to_qs_map_cmd->qs_group_id);
+ dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n",
+ le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map));
+ return;
+
+err_tm_pg_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), ret = %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_tm(struct hclge_dev *hdev)
+{
+ struct hclge_priority_weight_cmd *priority_weight;
+ struct hclge_pg_to_pri_link_cmd *pg_to_pri_map;
+ struct hclge_qs_to_pri_link_cmd *qs_to_pri_map;
+ struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ struct hclge_pg_weight_cmd *pg_weight;
+ struct hclge_qs_weight_cmd *qs_weight;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int ret;
+
+ cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump tm\n");
+ dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n",
+ pg_to_pri_map->pg_id);
+ dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n",
+ pg_to_pri_map->pri_bit_map);
+
+ cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n",
+ le16_to_cpu(qs_to_pri_map->qs_id));
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n",
+ qs_to_pri_map->priority);
+ dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n",
+ qs_to_pri_map->link_vld);
+
+ cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n",
+ le16_to_cpu(nq_to_qs_map->nq_id));
+ dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: 0x%x\n",
+ le16_to_cpu(nq_to_qs_map->qset_id));
+
+ cmd = HCLGE_OPC_TM_PG_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ pg_weight = (struct hclge_pg_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id);
+ dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_QS_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ qs_weight = (struct hclge_qs_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "QS qs_id: %u\n",
+ le16_to_cpu(qs_weight->qs_id));
+ dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_PRI_WEIGHT;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ priority_weight = (struct hclge_priority_weight_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr);
+
+ cmd = HCLGE_OPC_TM_PRI_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n",
+ le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
+
+ cmd = HCLGE_OPC_TM_PRI_P_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_cmd_send;
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id);
+ dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n",
+ le32_to_cpu(shap_cfg_cmd->pri_shapping_para));
+
+ hclge_dbg_dump_tm_pg(hdev);
+
+ return;
+
+err_tm_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), ret = %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+ struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+ struct hclge_nq_to_qs_link_cmd *nq_to_qs_map;
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_tqp_tx_queue_tc_cmd *tc;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc;
+ int queue_id, group_id;
+ u32 qset_mapping[32];
+ int tc_id, qset_id;
+ int pri_id, ret;
+ u32 i;
+
+ ret = kstrtouint(cmd_buf, 0, &queue_id);
+ queue_id = (ret != 0) ? 0 : queue_id;
+
+ cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK;
+ nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ nq_to_qs_map->nq_id = cpu_to_le16(queue_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+ qset_id = le16_to_cpu(nq_to_qs_map->qset_id) & 0x3FF;
+
+ cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK;
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ map->qs_id = cpu_to_le16(qset_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+ pri_id = map->priority;
+
+ cmd = HCLGE_OPC_TQP_TX_QUEUE_TC;
+ tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ tc->queue_id = cpu_to_le16(queue_id);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+ tc_id = tc->tc_id & 0x7;
+
+ dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n");
+ dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n",
+ queue_id, qset_id, pri_id, tc_id);
+
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports tm mapping\n");
+ return;
+ }
+
+ cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING;
+ bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+ for (group_id = 0; group_id < 32; group_id++) {
+ hclge_cmd_setup_basic_desc(&desc, cmd, true);
+ bp_to_qs_map_cmd->tc_id = tc_id;
+ bp_to_qs_map_cmd->qs_group_id = group_id;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ goto err_tm_map_cmd_send;
+
+ qset_mapping[group_id] =
+ le32_to_cpu(bp_to_qs_map_cmd->qs_bit_map);
+ }
+
+ dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n");
+
+ i = 0;
+ for (group_id = 0; group_id < 4; group_id++) {
+ dev_info(&hdev->pdev->dev,
+ "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n",
+ group_id * 256, qset_mapping[(u32)(i + 7)],
+ qset_mapping[(u32)(i + 6)], qset_mapping[(u32)(i + 5)],
+ qset_mapping[(u32)(i + 4)], qset_mapping[(u32)(i + 3)],
+ qset_mapping[(u32)(i + 2)], qset_mapping[(u32)(i + 1)],
+ qset_mapping[i]);
+ i += 8;
+ }
+
+ return;
+
+err_tm_map_cmd_send:
+ dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), ret = %d\n",
+ cmd, ret);
+}
+
+static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "dump checksum fail, ret = %d\n",
+ ret);
+ return;
+ }
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump qos pause cfg\n");
+ dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n",
+ pause_param->pause_trans_gap);
+ dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n",
+ le16_to_cpu(pause_param->pause_trans_time));
+}
+
+static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
+{
+ struct hclge_qos_pri_map_cmd *pri_map;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "dump qos pri map fail, ret = %d\n", ret);
+ return;
+ }
+
+ pri_map = (struct hclge_qos_pri_map_cmd *)desc.data;
+ dev_info(&hdev->pdev->dev, "dump qos pri map\n");
+ dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri);
+ dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc);
+ dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc);
+ dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc);
+ dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc);
+ dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc);
+ dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc);
+ dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc);
+ dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
+}
+
+static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
+ struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
+ struct hclge_rx_priv_wl_buf *rx_priv_wl;
+ struct hclge_rx_com_wl *rx_packet_cnt;
+ struct hclge_rx_com_thrd *rx_com_thrd;
+ struct hclge_rx_com_wl *rx_com_wl;
+ enum hclge_opcode_type cmd;
+ struct hclge_desc desc[2];
+ int i, ret;
+
+ cmd = HCLGE_OPC_TX_BUFF_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
+
+ tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
+
+ cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data;
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
+ le16_to_cpu(rx_buf_cmd->buf_num[i]));
+
+ dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
+ le16_to_cpu(rx_buf_cmd->shared_buf));
+
+ cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev, "\n");
+ dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_com_wl->com_wl.high),
+ le16_to_cpu(rx_com_wl->com_wl.low));
+
+ cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data;
+ dev_info(&hdev->pdev->dev,
+ "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
+ le16_to_cpu(rx_packet_cnt->com_wl.high),
+ le16_to_cpu(rx_packet_cnt->com_wl.low));
+ dev_info(&hdev->pdev->dev, "\n");
+
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ dev_info(&hdev->pdev->dev,
+ "Only DCB-supported dev supports rx priv wl\n");
+ return;
+ }
+ cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+
+ rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_priv_wl->tc_wl[i].high),
+ le16_to_cpu(rx_priv_wl->tc_wl[i].low));
+
+ cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ goto err_qos_cmd_send;
+
+ dev_info(&hdev->pdev->dev, "\n");
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+
+ rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data;
+ for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
+ dev_info(&hdev->pdev->dev,
+ "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n",
+ i + HCLGE_TC_NUM_ONE_DESC,
+ le16_to_cpu(rx_com_thrd->com_thrd[i].high),
+ le16_to_cpu(rx_com_thrd->com_thrd[i].low));
+ return;
+
+err_qos_cmd_send:
+ dev_err(&hdev->pdev->dev,
+ "dump qos buf cfg fail(0x%x), ret = %d\n", cmd, ret);
+}
+
+static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev)
+{
+ struct hclge_mac_ethertype_idx_rd_cmd *req0;
+ char printf_buf[HCLGE_DBG_BUF_LEN];
+ struct hclge_desc desc;
+ u32 msg_egress_port;
+ int ret, i;
+
+ dev_info(&hdev->pdev->dev, "mng tab:\n");
+ memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
+ strncat(printf_buf,
+ "entry|mac_addr |mask|ether|mask|vlan|mask",
+ HCLGE_DBG_BUF_LEN - 1);
+ strncat(printf_buf + strlen(printf_buf),
+ "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n",
+ HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1);
+
+ dev_info(&hdev->pdev->dev, "%s", printf_buf);
+
+ for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD,
+ true);
+ req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data;
+ req0->index = cpu_to_le16(i);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "call hclge_cmd_send fail, ret = %d\n", ret);
+ return;
+ }
+
+ if (!req0->resp_code)
+ continue;
+
+ memset(printf_buf, 0, HCLGE_DBG_BUF_LEN);
+ snprintf(printf_buf, HCLGE_DBG_BUF_LEN,
+ "%02u |%02x:%02x:%02x:%02x:%02x:%02x|",
+ le16_to_cpu(req0->index),
+ req0->mac_addr[0], req0->mac_addr[1],
+ req0->mac_addr[2], req0->mac_addr[3],
+ req0->mac_addr[4], req0->mac_addr[5]);
+
+ snprintf(printf_buf + strlen(printf_buf),
+ HCLGE_DBG_BUF_LEN - strlen(printf_buf),
+ "%x |%04x |%x |%04x|%x |%02x |%02x |",
+ !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B),
+ le16_to_cpu(req0->ethter_type),
+ !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B),
+ le16_to_cpu(req0->vlan_tag) & HCLGE_DBG_MNG_VLAN_TAG,
+ !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B),
+ req0->i_port_bitmap, req0->i_port_direction);
+
+ msg_egress_port = le16_to_cpu(req0->egress_port);
+ snprintf(printf_buf + strlen(printf_buf),
+ HCLGE_DBG_BUF_LEN - strlen(printf_buf),
+ "%x |%x |%02x |%04x|%x\n",
+ !!(msg_egress_port & HCLGE_DBG_MNG_E_TYPE_B),
+ msg_egress_port & HCLGE_DBG_MNG_PF_ID,
+ (msg_egress_port >> 3) & HCLGE_DBG_MNG_VF_ID,
+ le16_to_cpu(req0->egress_queue),
+ !!(msg_egress_port & HCLGE_DBG_MNG_DROP_B));
+
+ dev_info(&hdev->pdev->dev, "%s", printf_buf);
+ }
+}
+
+static int hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage,
+ bool sel_x, u32 loc)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int ret, i;
+ u32 *req;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ req1->index = cpu_to_le32(loc);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ return ret;
+
+ dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n",
+ sel_x ? "x" : "y", loc);
+
+ /* tcam_data0 ~ tcam_data1 */
+ req = (u32 *)req1->tcam_data;
+ for (i = 0; i < 2; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ /* tcam_data2 ~ tcam_data7 */
+ req = (u32 *)req2->tcam_data;
+ for (i = 0; i < 6; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ /* tcam_data8 ~ tcam_data12 */
+ req = (u32 *)req3->tcam_data;
+ for (i = 0; i < 5; i++)
+ dev_info(&hdev->pdev->dev, "%08x\n", *req++);
+
+ return ret;
+}
+
+static int hclge_dbg_get_rules_location(struct hclge_dev *hdev, u16 *rule_locs)
+{
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int cnt = 0;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (cnt != hdev->hclge_fd_rule_num)
+ return -EINVAL;
+
+ return cnt;
+}
+
+static void hclge_dbg_fd_tcam(struct hclge_dev *hdev)
+{
+ int i, ret, rule_cnt;
+ u16 *rule_locs;
+
+ if (!hnae3_dev_fd_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "Only FD-supported dev supports dump fd tcam\n");
+ return;
+ }
+
+ if (!hdev->hclge_fd_rule_num ||
+ !hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return;
+
+ rule_locs = kcalloc(hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+ sizeof(u16), GFP_KERNEL);
+ if (!rule_locs)
+ return;
+
+ rule_cnt = hclge_dbg_get_rules_location(hdev, rule_locs);
+ if (rule_cnt <= 0) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get rule number, ret = %d\n", rule_cnt);
+ kfree(rule_locs);
+ return;
+ }
+
+ for (i = 0; i < rule_cnt; i++) {
+ ret = hclge_dbg_fd_tcam_read(hdev, 0, true, rule_locs[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key x, ret = %d\n", ret);
+ kfree(rule_locs);
+ return;
+ }
+
+ ret = hclge_dbg_fd_tcam_read(hdev, 0, false, rule_locs[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get fd tcam key y, ret = %d\n", ret);
+ kfree(rule_locs);
+ return;
+ }
+ }
+
+ kfree(rule_locs);
+}
+
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "PF reset count: %u\n",
+ hdev->rst_stats.pf_rst_cnt);
+ dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
+ hdev->rst_stats.flr_rst_cnt);
+ dev_info(&hdev->pdev->dev, "GLOBAL reset count: %u\n",
+ hdev->rst_stats.global_rst_cnt);
+ dev_info(&hdev->pdev->dev, "IMP reset count: %u\n",
+ hdev->rst_stats.imp_rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset done count: %u\n",
+ hdev->rst_stats.reset_done_cnt);
+ dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_reset_done_cnt);
+ dev_info(&hdev->pdev->dev, "reset count: %u\n",
+ hdev->rst_stats.reset_cnt);
+ dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
+ hdev->rst_stats.reset_fail_cnt);
+ dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_REG_BASE));
+ dev_info(&hdev->pdev->dev, "reset interrupt source: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG));
+ dev_info(&hdev->pdev->dev, "reset interrupt status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS));
+ dev_info(&hdev->pdev->dev, "hardware reset status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
+ dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG));
+ dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING));
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
+}
+
+static void hclge_dbg_dump_serv_info(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "last_serv_processed: %lu\n",
+ hdev->last_serv_processed);
+ dev_info(&hdev->pdev->dev, "last_serv_cnt: %lu\n",
+ hdev->serv_processed_cnt);
+}
+
+static void hclge_dbg_dump_interrupt(struct hclge_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "num_nic_msi: %u\n", hdev->num_nic_msi);
+ dev_info(&hdev->pdev->dev, "num_roce_msi: %u\n", hdev->num_roce_msi);
+ dev_info(&hdev->pdev->dev, "num_msi_used: %u\n", hdev->num_msi_used);
+ dev_info(&hdev->pdev->dev, "num_msi_left: %u\n", hdev->num_msi_left);
+}
+
+static void hclge_dbg_get_m7_stats_info(struct hclge_dev *hdev)
+{
+ struct hclge_desc *desc_src, *desc_tmp;
+ struct hclge_get_m7_bd_cmd *req;
+ struct hclge_desc desc;
+ u32 bd_num, buf_len;
+ int ret, i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_M7_STATS_BD, true);
+
+ req = (struct hclge_get_m7_bd_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "get firmware statistics bd number failed, ret = %d\n",
+ ret);
+ return;
+ }
+
+ bd_num = le32_to_cpu(req->bd_num);
+
+ buf_len = sizeof(struct hclge_desc) * bd_num;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src)
+ return;
+
+ desc_tmp = desc_src;
+ ret = hclge_dbg_cmd_send(hdev, desc_tmp, 0, bd_num,
+ HCLGE_OPC_M7_STATS_INFO);
+ if (ret) {
+ kfree(desc_src);
+ dev_err(&hdev->pdev->dev,
+ "get firmware statistics failed, ret = %d\n", ret);
+ return;
+ }
+
+ for (i = 0; i < bd_num; i++) {
+ dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
+ le32_to_cpu(desc_tmp->data[0]),
+ le32_to_cpu(desc_tmp->data[1]),
+ le32_to_cpu(desc_tmp->data[2]));
+ dev_info(&hdev->pdev->dev, "0x%08x 0x%08x 0x%08x\n",
+ le32_to_cpu(desc_tmp->data[3]),
+ le32_to_cpu(desc_tmp->data[4]),
+ le32_to_cpu(desc_tmp->data[5]));
+
+ desc_tmp++;
+ }
+
+ kfree(desc_src);
+}
+
+#define HCLGE_CMD_NCL_CONFIG_BD_NUM 5
+
+static void hclge_ncl_config_data_print(struct hclge_dev *hdev,
+ struct hclge_desc *desc, int *offset,
+ int *length)
+{
+#define HCLGE_CMD_DATA_NUM 6
+
+ int i;
+ int j;
+
+ for (i = 0; i < HCLGE_CMD_NCL_CONFIG_BD_NUM; i++) {
+ for (j = 0; j < HCLGE_CMD_DATA_NUM; j++) {
+ if (i == 0 && j == 0)
+ continue;
+
+ dev_info(&hdev->pdev->dev, "0x%04x | 0x%08x\n",
+ *offset,
+ le32_to_cpu(desc[i].data[j]));
+ *offset += sizeof(u32);
+ *length -= sizeof(u32);
+ if (*length <= 0)
+ return;
+ }
+ }
+}
+
+/* hclge_dbg_dump_ncl_config: print specified range of NCL_CONFIG file
+ * @hdev: pointer to struct hclge_dev
+ * @cmd_buf: string that contains offset and length
+ */
+static void hclge_dbg_dump_ncl_config(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+#define HCLGE_MAX_NCL_CONFIG_OFFSET 4096
+#define HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD (20 + 24 * 4)
+#define HCLGE_NCL_CONFIG_PARAM_NUM 2
+
+ struct hclge_desc desc[HCLGE_CMD_NCL_CONFIG_BD_NUM];
+ int bd_num = HCLGE_CMD_NCL_CONFIG_BD_NUM;
+ int offset;
+ int length;
+ int data0;
+ int ret;
+
+ ret = sscanf(cmd_buf, "%x %x", &offset, &length);
+ if (ret != HCLGE_NCL_CONFIG_PARAM_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Too few parameters, num = %d.\n", ret);
+ return;
+ }
+
+ if (offset < 0 || offset >= HCLGE_MAX_NCL_CONFIG_OFFSET ||
+ length <= 0 || length > HCLGE_MAX_NCL_CONFIG_OFFSET - offset) {
+ dev_err(&hdev->pdev->dev,
+ "Invalid input, offset = %d, length = %d.\n",
+ offset, length);
+ return;
+ }
+
+ dev_info(&hdev->pdev->dev, "offset | data\n");
+
+ while (length > 0) {
+ data0 = offset;
+ if (length >= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD)
+ data0 |= HCLGE_NCL_CONFIG_LENGTH_IN_EACH_CMD << 16;
+ else
+ data0 |= length << 16;
+ ret = hclge_dbg_cmd_send(hdev, desc, data0, bd_num,
+ HCLGE_OPC_QUERY_NCL_CONFIG);
+ if (ret)
+ return;
+
+ hclge_ncl_config_data_print(hdev, desc, &offset, &length);
+ }
+}
+
+static void hclge_dbg_dump_loopback(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ struct hclge_config_mac_mode_cmd *req_app;
+ struct hclge_serdes_lb_cmd *req_serdes;
+ struct hclge_desc desc;
+ u8 loopback_en;
+ int ret;
+
+ req_app = (struct hclge_config_mac_mode_cmd *)desc.data;
+ req_serdes = (struct hclge_serdes_lb_cmd *)desc.data;
+
+ dev_info(&hdev->pdev->dev, "mac id: %u\n", hdev->hw.mac.mac_id);
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump app loopback status, ret = %d\n", ret);
+ return;
+ }
+
+ loopback_en = hnae3_get_bit(le32_to_cpu(req_app->txrx_pad_fcs_loop_en),
+ HCLGE_MAC_APP_LP_B);
+ dev_info(&hdev->pdev->dev, "app loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to dump serdes loopback status, ret = %d\n",
+ ret);
+ return;
+ }
+
+ loopback_en = req_serdes->enable & HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ dev_info(&hdev->pdev->dev, "serdes serial loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ loopback_en = req_serdes->enable &
+ HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+ dev_info(&hdev->pdev->dev, "serdes parallel loopback: %s\n",
+ loopback_en ? "on" : "off");
+
+ if (phydev)
+ dev_info(&hdev->pdev->dev, "phy loopback: %s\n",
+ phydev->loopback_enabled ? "on" : "off");
+}
+
+/* hclge_dbg_dump_mac_tnl_status: print message about mac tnl interrupt
+ * @hdev: pointer to struct hclge_dev
+ */
+static void hclge_dbg_dump_mac_tnl_status(struct hclge_dev *hdev)
+{
+#define HCLGE_BILLION_NANO_SECONDS 1000000000
+
+ struct hclge_mac_tnl_stats stats;
+ unsigned long rem_nsec;
+
+ dev_info(&hdev->pdev->dev, "Recently generated mac tnl interruption:\n");
+
+ while (kfifo_get(&hdev->mac_tnl_log, &stats)) {
+ rem_nsec = do_div(stats.time, HCLGE_BILLION_NANO_SECONDS);
+ dev_info(&hdev->pdev->dev, "[%07lu.%03lu] status = 0x%x\n",
+ (unsigned long)stats.time, rem_nsec / 1000,
+ stats.status);
+ }
+}
+
+static void hclge_dbg_dump_qs_shaper_single(struct hclge_dev *hdev, u16 qsid)
+{
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ u8 ir_u, ir_b, ir_s, bs_b, bs_s;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG, true);
+
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(qsid);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "qs%u failed to get tx_rate, ret=%d\n",
+ qsid, ret);
+ return;
+ }
+
+ shapping_para = le32_to_cpu(shap_cfg_cmd->qs_shapping_para);
+ ir_b = hclge_tm_get_field(shapping_para, IR_B);
+ ir_u = hclge_tm_get_field(shapping_para, IR_U);
+ ir_s = hclge_tm_get_field(shapping_para, IR_S);
+ bs_b = hclge_tm_get_field(shapping_para, BS_B);
+ bs_s = hclge_tm_get_field(shapping_para, BS_S);
+
+ dev_info(&hdev->pdev->dev,
+ "qs%u ir_b:%u, ir_u:%u, ir_s:%u, bs_b:%u, bs_s:%u\n",
+ qsid, ir_b, ir_u, ir_s, bs_b, bs_s);
+}
+
+static void hclge_dbg_dump_qs_shaper_all(struct hclge_dev *hdev)
+{
+ struct hnae3_knic_private_info *kinfo;
+ struct hclge_vport *vport;
+ int vport_id, i;
+
+ for (vport_id = 0; vport_id <= pci_num_vf(hdev->pdev); vport_id++) {
+ vport = &hdev->vport[vport_id];
+ kinfo = &vport->nic.kinfo;
+
+ dev_info(&hdev->pdev->dev, "qs cfg of vport%d:\n", vport_id);
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ u16 qsid = vport->qs_offset + i;
+
+ hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+ }
+ }
+}
+
+static void hclge_dbg_dump_qs_shaper(struct hclge_dev *hdev,
+ const char *cmd_buf)
+{
+#define HCLGE_MAX_QSET_NUM 1024
+
+ u16 qsid;
+ int ret;
+
+ ret = kstrtou16(cmd_buf, 0, &qsid);
+ if (ret) {
+ hclge_dbg_dump_qs_shaper_all(hdev);
+ return;
+ }
+
+ if (qsid >= HCLGE_MAX_QSET_NUM) {
+ dev_err(&hdev->pdev->dev, "qsid(%u) out of range[0-1023]\n",
+ qsid);
+ return;
+ }
+
+ hclge_dbg_dump_qs_shaper_single(hdev, qsid);
+}
+
+static int hclge_dbg_dump_mac_list(struct hclge_dev *hdev, const char *cmd_buf,
+ bool is_unicast)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_vport *vport;
+ struct list_head *list;
+ u32 func_id;
+ int ret;
+
+ ret = kstrtouint(cmd_buf, 0, &func_id);
+ if (ret < 0) {
+ dev_err(&hdev->pdev->dev,
+ "dump mac list: bad command string, ret = %d\n", ret);
+ return -EINVAL;
+ }
+
+ if (func_id >= hdev->num_alloc_vport) {
+ dev_err(&hdev->pdev->dev,
+ "function id(%u) is out of range(0-%u)\n", func_id,
+ hdev->num_alloc_vport - 1);
+ return -EINVAL;
+ }
+
+ vport = &hdev->vport[func_id];
+
+ list = is_unicast ? &vport->uc_mac_list : &vport->mc_mac_list;
+
+ dev_info(&hdev->pdev->dev, "vport %u %s mac list:\n",
+ func_id, is_unicast ? "uc" : "mc");
+ dev_info(&hdev->pdev->dev, "mac address state\n");
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ dev_info(&hdev->pdev->dev, "%pM %d\n",
+ mac_node->mac_addr, mac_node->state);
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
+}
+
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf)
+{
+#define DUMP_REG "dump reg"
+#define DUMP_TM_MAP "dump tm map"
+#define DUMP_LOOPBACK "dump loopback"
+#define DUMP_INTERRUPT "dump intr"
+
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) {
+ hclge_dbg_fd_tcam(hdev);
+ } else if (strncmp(cmd_buf, "dump tc", 7) == 0) {
+ hclge_dbg_dump_tc(hdev);
+ } else if (strncmp(cmd_buf, DUMP_TM_MAP, strlen(DUMP_TM_MAP)) == 0) {
+ hclge_dbg_dump_tm_map(hdev, &cmd_buf[sizeof(DUMP_TM_MAP)]);
+ } else if (strncmp(cmd_buf, "dump tm", 7) == 0) {
+ hclge_dbg_dump_tm(hdev);
+ } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) {
+ hclge_dbg_dump_qos_pause_cfg(hdev);
+ } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) {
+ hclge_dbg_dump_qos_pri_map(hdev);
+ } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) {
+ hclge_dbg_dump_qos_buf_cfg(hdev);
+ } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) {
+ hclge_dbg_dump_mng_table(hdev);
+ } else if (strncmp(cmd_buf, DUMP_REG, strlen(DUMP_REG)) == 0) {
+ hclge_dbg_dump_reg_cmd(hdev, &cmd_buf[sizeof(DUMP_REG)]);
+ } else if (strncmp(cmd_buf, "dump reset info", 15) == 0) {
+ hclge_dbg_dump_rst_info(hdev);
+ } else if (strncmp(cmd_buf, "dump serv info", 14) == 0) {
+ hclge_dbg_dump_serv_info(hdev);
+ } else if (strncmp(cmd_buf, "dump m7 info", 12) == 0) {
+ hclge_dbg_get_m7_stats_info(hdev);
+ } else if (strncmp(cmd_buf, "dump ncl_config", 15) == 0) {
+ hclge_dbg_dump_ncl_config(hdev,
+ &cmd_buf[sizeof("dump ncl_config")]);
+ } else if (strncmp(cmd_buf, "dump mac tnl status", 19) == 0) {
+ hclge_dbg_dump_mac_tnl_status(hdev);
+ } else if (strncmp(cmd_buf, DUMP_LOOPBACK,
+ strlen(DUMP_LOOPBACK)) == 0) {
+ hclge_dbg_dump_loopback(hdev, &cmd_buf[sizeof(DUMP_LOOPBACK)]);
+ } else if (strncmp(cmd_buf, "dump qs shaper", 14) == 0) {
+ hclge_dbg_dump_qs_shaper(hdev,
+ &cmd_buf[sizeof("dump qs shaper")]);
+ } else if (strncmp(cmd_buf, "dump uc mac list", 16) == 0) {
+ hclge_dbg_dump_mac_list(hdev,
+ &cmd_buf[sizeof("dump uc mac list")],
+ true);
+ } else if (strncmp(cmd_buf, "dump mc mac list", 16) == 0) {
+ hclge_dbg_dump_mac_list(hdev,
+ &cmd_buf[sizeof("dump mc mac list")],
+ false);
+ } else if (strncmp(cmd_buf, DUMP_INTERRUPT,
+ strlen(DUMP_INTERRUPT)) == 0) {
+ hclge_dbg_dump_interrupt(hdev);
+ } else {
+ dev_info(&hdev->pdev->dev, "unknown command\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
new file mode 100644
index 000000000..ca2ab6cf8
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h
@@ -0,0 +1,726 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+#ifndef __HCLGE_DEBUGFS_H
+#define __HCLGE_DEBUGFS_H
+
+#include <linux/etherdevice.h>
+#include "hclge_cmd.h"
+
+#define HCLGE_DBG_BUF_LEN 256
+#define HCLGE_DBG_MNG_TBL_MAX 64
+
+#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0)
+#define HCLGE_DBG_MNG_MAC_MASK_B BIT(1)
+#define HCLGE_DBG_MNG_ETHER_MASK_B BIT(2)
+#define HCLGE_DBG_MNG_E_TYPE_B BIT(11)
+#define HCLGE_DBG_MNG_DROP_B BIT(13)
+#define HCLGE_DBG_MNG_VLAN_TAG 0x0FFF
+#define HCLGE_DBG_MNG_PF_ID 0x0007
+#define HCLGE_DBG_MNG_VF_ID 0x00FF
+
+/* Get DFX BD number offset */
+#define HCLGE_DBG_DFX_BIOS_OFFSET 1
+#define HCLGE_DBG_DFX_SSU_0_OFFSET 2
+#define HCLGE_DBG_DFX_SSU_1_OFFSET 3
+#define HCLGE_DBG_DFX_IGU_OFFSET 4
+#define HCLGE_DBG_DFX_RPU_0_OFFSET 5
+
+#define HCLGE_DBG_DFX_RPU_1_OFFSET 6
+#define HCLGE_DBG_DFX_NCSI_OFFSET 7
+#define HCLGE_DBG_DFX_RTC_OFFSET 8
+#define HCLGE_DBG_DFX_PPP_OFFSET 9
+#define HCLGE_DBG_DFX_RCB_OFFSET 10
+#define HCLGE_DBG_DFX_TQP_OFFSET 11
+
+#define HCLGE_DBG_DFX_SSU_2_OFFSET 12
+
+struct hclge_qos_pri_map_cmd {
+ u8 pri0_tc : 4,
+ pri1_tc : 4;
+ u8 pri2_tc : 4,
+ pri3_tc : 4;
+ u8 pri4_tc : 4,
+ pri5_tc : 4;
+ u8 pri6_tc : 4,
+ pri7_tc : 4;
+ u8 vlan_pri : 4,
+ rev : 4;
+};
+
+struct hclge_dbg_bitmap_cmd {
+ union {
+ u8 bitmap;
+ struct {
+ u8 bit0 : 1,
+ bit1 : 1,
+ bit2 : 1,
+ bit3 : 1,
+ bit4 : 1,
+ bit5 : 1,
+ bit6 : 1,
+ bit7 : 1;
+ };
+ };
+};
+
+struct hclge_dbg_reg_common_msg {
+ int msg_num;
+ int offset;
+ enum hclge_opcode_type cmd;
+};
+
+#define HCLGE_DBG_MAX_DFX_MSG_LEN 60
+struct hclge_dbg_dfx_message {
+ int flag;
+ char message[HCLGE_DBG_MAX_DFX_MSG_LEN];
+};
+
+#define HCLGE_DBG_MAC_REG_TYPE_LEN 32
+struct hclge_dbg_reg_type_info {
+ const char *reg_type;
+ const struct hclge_dbg_dfx_message *dfx_msg;
+ struct hclge_dbg_reg_common_msg reg_msg;
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = {
+ {false, "Reserved"},
+ {true, "BP_CPU_STATE"},
+ {true, "DFX_MSIX_INFO_NIC_0"},
+ {true, "DFX_MSIX_INFO_NIC_1"},
+ {true, "DFX_MSIX_INFO_NIC_2"},
+ {true, "DFX_MSIX_INFO_NIC_3"},
+
+ {true, "DFX_MSIX_INFO_ROC_0"},
+ {true, "DFX_MSIX_INFO_ROC_1"},
+ {true, "DFX_MSIX_INFO_ROC_2"},
+ {true, "DFX_MSIX_INFO_ROC_3"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = {
+ {false, "Reserved"},
+ {true, "SSU_ETS_PORT_STATUS"},
+ {true, "SSU_ETS_TCG_STATUS"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {true, "SSU_BP_STATUS_0"},
+
+ {true, "SSU_BP_STATUS_1"},
+ {true, "SSU_BP_STATUS_2"},
+ {true, "SSU_BP_STATUS_3"},
+ {true, "SSU_BP_STATUS_4"},
+ {true, "SSU_BP_STATUS_5"},
+ {true, "SSU_MAC_TX_PFC_IND"},
+
+ {true, "MAC_SSU_RX_PFC_IND"},
+ {true, "BTMP_AGEING_ST_B0"},
+ {true, "BTMP_AGEING_ST_B1"},
+ {true, "BTMP_AGEING_ST_B2"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "FULL_DROP_NUM"},
+ {true, "PART_DROP_NUM"},
+ {true, "PPP_KEY_DROP_NUM"},
+ {true, "PPP_RLT_DROP_NUM"},
+ {true, "LO_PRI_UNICAST_RLT_DROP_NUM"},
+ {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"},
+
+ {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"},
+ {true, "NCSI_PACKET_CURR_BUFFER_CNT"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK0"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK1"},
+ {true, "BTMP_AGEING_RLS_CNT_BANK2"},
+ {true, "SSU_MB_RD_RLT_DROP_CNT"},
+
+ {true, "SSU_PPP_MAC_KEY_NUM_L"},
+ {true, "SSU_PPP_MAC_KEY_NUM_H"},
+ {true, "SSU_PPP_HOST_KEY_NUM_L"},
+ {true, "SSU_PPP_HOST_KEY_NUM_H"},
+ {true, "PPP_SSU_MAC_RLT_NUM_L"},
+ {true, "PPP_SSU_MAC_RLT_NUM_H"},
+
+ {true, "PPP_SSU_HOST_RLT_NUM_L"},
+ {true, "PPP_SSU_HOST_RLT_NUM_H"},
+ {true, "NCSI_RX_PACKET_IN_CNT_L"},
+ {true, "NCSI_RX_PACKET_IN_CNT_H"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_L"},
+ {true, "NCSI_TX_PACKET_OUT_CNT_H"},
+
+ {true, "SSU_KEY_DROP_NUM"},
+ {true, "MB_UNCOPY_NUM"},
+ {true, "RX_OQ_DROP_PKT_CNT"},
+ {true, "TX_OQ_DROP_PKT_CNT"},
+ {true, "BANK_UNBALANCE_DROP_CNT"},
+ {true, "BANK_UNBALANCE_RX_DROP_CNT"},
+
+ {true, "NIC_L2_ERR_DROP_PKT_CNT"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT"},
+ {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"},
+ {true, "RX_OQ_GLB_DROP_PKT_CNT"},
+ {false, "Reserved"},
+
+ {true, "LO_PRI_UNICAST_CUR_CNT"},
+ {true, "HI_PRI_MULTICAST_CUR_CNT"},
+ {true, "LO_PRI_MULTICAST_CUR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = {
+ {true, "prt_id"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_0"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_1"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_2"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_3"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_4"},
+
+ {true, "PACKET_TC_CURR_BUFFER_CNT_5"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_6"},
+ {true, "PACKET_TC_CURR_BUFFER_CNT_7"},
+ {true, "PACKET_CURR_BUFFER_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "RX_PACKET_IN_CNT_L"},
+ {true, "RX_PACKET_IN_CNT_H"},
+ {true, "RX_PACKET_OUT_CNT_L"},
+ {true, "RX_PACKET_OUT_CNT_H"},
+ {true, "TX_PACKET_IN_CNT_L"},
+ {true, "TX_PACKET_IN_CNT_H"},
+
+ {true, "TX_PACKET_OUT_CNT_L"},
+ {true, "TX_PACKET_OUT_CNT_H"},
+ {true, "ROC_RX_PACKET_IN_CNT_L"},
+ {true, "ROC_RX_PACKET_IN_CNT_H"},
+ {true, "ROC_TX_PACKET_OUT_CNT_L"},
+ {true, "ROC_TX_PACKET_OUT_CNT_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_0_L"},
+ {true, "RX_PACKET_TC_IN_CNT_0_H"},
+ {true, "RX_PACKET_TC_IN_CNT_1_L"},
+ {true, "RX_PACKET_TC_IN_CNT_1_H"},
+ {true, "RX_PACKET_TC_IN_CNT_2_L"},
+ {true, "RX_PACKET_TC_IN_CNT_2_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_3_L"},
+ {true, "RX_PACKET_TC_IN_CNT_3_H"},
+ {true, "RX_PACKET_TC_IN_CNT_4_L"},
+ {true, "RX_PACKET_TC_IN_CNT_4_H"},
+ {true, "RX_PACKET_TC_IN_CNT_5_L"},
+ {true, "RX_PACKET_TC_IN_CNT_5_H"},
+
+ {true, "RX_PACKET_TC_IN_CNT_6_L"},
+ {true, "RX_PACKET_TC_IN_CNT_6_H"},
+ {true, "RX_PACKET_TC_IN_CNT_7_L"},
+ {true, "RX_PACKET_TC_IN_CNT_7_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_0_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_2_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_3_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_5_H"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_6_H"},
+
+ {true, "RX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "RX_PACKET_TC_OUT_CNT_7_H"},
+ {true, "TX_PACKET_TC_IN_CNT_0_L"},
+ {true, "TX_PACKET_TC_IN_CNT_0_H"},
+ {true, "TX_PACKET_TC_IN_CNT_1_L"},
+ {true, "TX_PACKET_TC_IN_CNT_1_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_2_L"},
+ {true, "TX_PACKET_TC_IN_CNT_2_H"},
+ {true, "TX_PACKET_TC_IN_CNT_3_L"},
+ {true, "TX_PACKET_TC_IN_CNT_3_H"},
+ {true, "TX_PACKET_TC_IN_CNT_4_L"},
+ {true, "TX_PACKET_TC_IN_CNT_4_H"},
+
+ {true, "TX_PACKET_TC_IN_CNT_5_L"},
+ {true, "TX_PACKET_TC_IN_CNT_5_H"},
+ {true, "TX_PACKET_TC_IN_CNT_6_L"},
+ {true, "TX_PACKET_TC_IN_CNT_6_H"},
+ {true, "TX_PACKET_TC_IN_CNT_7_L"},
+ {true, "TX_PACKET_TC_IN_CNT_7_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_0_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_0_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_1_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_2_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_3_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_3_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_4_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_5_H"},
+
+ {true, "TX_PACKET_TC_OUT_CNT_6_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_6_H"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_L"},
+ {true, "TX_PACKET_TC_OUT_CNT_7_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = {
+ {true, "OQ_INDEX"},
+ {true, "QUEUE_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = {
+ {true, "prt_id"},
+ {true, "IGU_RX_ERR_PKT"},
+ {true, "IGU_RX_NO_SOF_PKT"},
+ {true, "EGU_TX_1588_SHORT_PKT"},
+ {true, "EGU_TX_1588_PKT"},
+ {true, "EGU_TX_ERR_PKT"},
+
+ {true, "IGU_RX_OUT_L2_PKT"},
+ {true, "IGU_RX_OUT_L3_PKT"},
+ {true, "IGU_RX_OUT_L4_PKT"},
+ {true, "IGU_RX_IN_L2_PKT"},
+ {true, "IGU_RX_IN_L3_PKT"},
+ {true, "IGU_RX_IN_L4_PKT"},
+
+ {true, "IGU_RX_EL3E_PKT"},
+ {true, "IGU_RX_EL4E_PKT"},
+ {true, "IGU_RX_L3E_PKT"},
+ {true, "IGU_RX_L4E_PKT"},
+ {true, "IGU_RX_ROCEE_PKT"},
+ {true, "IGU_RX_OUT_UDP0_PKT"},
+
+ {true, "IGU_RX_IN_UDP0_PKT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "IGU_RX_OVERSIZE_PKT_L"},
+ {true, "IGU_RX_OVERSIZE_PKT_H"},
+ {true, "IGU_RX_UNDERSIZE_PKT_L"},
+ {true, "IGU_RX_UNDERSIZE_PKT_H"},
+ {true, "IGU_RX_OUT_ALL_PKT_L"},
+ {true, "IGU_RX_OUT_ALL_PKT_H"},
+
+ {true, "IGU_TX_OUT_ALL_PKT_L"},
+ {true, "IGU_TX_OUT_ALL_PKT_H"},
+ {true, "IGU_RX_UNI_PKT_L"},
+ {true, "IGU_RX_UNI_PKT_H"},
+ {true, "IGU_RX_MULTI_PKT_L"},
+ {true, "IGU_RX_MULTI_PKT_H"},
+
+ {true, "IGU_RX_BROAD_PKT_L"},
+ {true, "IGU_RX_BROAD_PKT_H"},
+ {true, "EGU_TX_OUT_ALL_PKT_L"},
+ {true, "EGU_TX_OUT_ALL_PKT_H"},
+ {true, "EGU_TX_UNI_PKT_L"},
+ {true, "EGU_TX_UNI_PKT_H"},
+
+ {true, "EGU_TX_MULTI_PKT_L"},
+ {true, "EGU_TX_MULTI_PKT_H"},
+ {true, "EGU_TX_BROAD_PKT_L"},
+ {true, "EGU_TX_BROAD_PKT_H"},
+ {true, "IGU_TX_KEY_NUM_L"},
+ {true, "IGU_TX_KEY_NUM_H"},
+
+ {true, "IGU_RX_NON_TUN_PKT_L"},
+ {true, "IGU_RX_NON_TUN_PKT_H"},
+ {true, "IGU_RX_TUN_PKT_L"},
+ {true, "IGU_RX_TUN_PKT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = {
+ {true, "tc_queue_num"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "RPU_RX_PKT_DROP_CNT"},
+ {true, "BUF_WAIT_TIMEOUT"},
+ {true, "BUF_WAIT_TIMEOUT_QID"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = {
+ {false, "Reserved"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+
+ {true, "FIFO_DFX_ST5"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = {
+ {false, "Reserved"},
+ {true, "NCSI_EGU_TX_FIFO_STS"},
+ {true, "NCSI_PAUSE_STATUS"},
+ {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_CKS_ERR_CNT"},
+
+ {true, "NCSI_RX_CTRL_PKT_CNT"},
+ {true, "NCSI_RX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_RX_PT_PKT_CNT"},
+ {true, "NCSI_RX_FCS_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"},
+
+ {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_CNT"},
+ {true, "NCSI_TX_PT_DMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_SMAC_ERR_CNT"},
+ {true, "NCSI_TX_PT_PKT_CNT"},
+ {true, "NCSI_TX_PT_PKT_TRUNC_CNT"},
+
+ {true, "NCSI_TX_PT_PKT_ERR_CNT"},
+ {true, "NCSI_TX_CTRL_PKT_ERR_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"},
+ {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "NCSI_MAC_RX_OCTETS_OK"},
+ {true, "NCSI_MAC_RX_OCTETS_BAD"},
+ {true, "NCSI_MAC_RX_UC_PKTS"},
+ {true, "NCSI_MAC_RX_MC_PKTS"},
+ {true, "NCSI_MAC_RX_BC_PKTS"},
+ {true, "NCSI_MAC_RX_PKTS_64OCTETS"},
+
+ {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"},
+ {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"},
+
+ {true, "NCSI_MAC_RX_FCS_ERRORS"},
+ {true, "NCSI_MAC_RX_LONG_ERRORS"},
+ {true, "NCSI_MAC_RX_JABBER_ERRORS"},
+ {true, "NCSI_MAC_RX_RUNT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_SHORT_ERR_CNT"},
+ {true, "NCSI_MAC_RX_FILT_PKT_CNT"},
+
+ {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"},
+ {true, "NCSI_MAC_TX_OCTETS_OK"},
+ {true, "NCSI_MAC_TX_OCTETS_BAD"},
+ {true, "NCSI_MAC_TX_UC_PKTS"},
+ {true, "NCSI_MAC_TX_MC_PKTS"},
+ {true, "NCSI_MAC_TX_BC_PKTS"},
+
+ {true, "NCSI_MAC_TX_PKTS_64OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"},
+ {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"},
+
+ {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"},
+ {true, "NCSI_MAC_TX_UNDERRUN"},
+ {true, "NCSI_MAC_TX_CRC_ERROR"},
+ {true, "NCSI_MAC_TX_PAUSE_FRAMES"},
+ {true, "NCSI_MAC_RX_PAD_PKTS"},
+ {true, "NCSI_MAC_RX_PAUSE_FRAMES"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = {
+ {false, "Reserved"},
+ {true, "LGE_IGU_AFIFO_DFX_0"},
+ {true, "LGE_IGU_AFIFO_DFX_1"},
+ {true, "LGE_IGU_AFIFO_DFX_2"},
+ {true, "LGE_IGU_AFIFO_DFX_3"},
+ {true, "LGE_IGU_AFIFO_DFX_4"},
+
+ {true, "LGE_IGU_AFIFO_DFX_5"},
+ {true, "LGE_IGU_AFIFO_DFX_6"},
+ {true, "LGE_IGU_AFIFO_DFX_7"},
+ {true, "LGE_EGU_AFIFO_DFX_0"},
+ {true, "LGE_EGU_AFIFO_DFX_1"},
+ {true, "LGE_EGU_AFIFO_DFX_2"},
+
+ {true, "LGE_EGU_AFIFO_DFX_3"},
+ {true, "LGE_EGU_AFIFO_DFX_4"},
+ {true, "LGE_EGU_AFIFO_DFX_5"},
+ {true, "LGE_EGU_AFIFO_DFX_6"},
+ {true, "LGE_EGU_AFIFO_DFX_7"},
+ {true, "CGE_IGU_AFIFO_DFX_0"},
+
+ {true, "CGE_IGU_AFIFO_DFX_1"},
+ {true, "CGE_EGU_AFIFO_DFX_0"},
+ {true, "CGE_EGU_AFIFO_DFX_1"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = {
+ {false, "Reserved"},
+ {true, "DROP_FROM_PRT_PKT_CNT"},
+ {true, "DROP_FROM_HOST_PKT_CNT"},
+ {true, "DROP_TX_VLAN_PROC_CNT"},
+ {true, "DROP_MNG_CNT"},
+ {true, "DROP_FD_CNT"},
+
+ {true, "DROP_NO_DST_CNT"},
+ {true, "DROP_MC_MBID_FULL_CNT"},
+ {true, "DROP_SC_FILTERED"},
+ {true, "PPP_MC_DROP_PKT_CNT"},
+ {true, "DROP_PT_CNT"},
+ {true, "DROP_MAC_ANTI_SPOOF_CNT"},
+
+ {true, "DROP_IG_VFV_CNT"},
+ {true, "DROP_IG_PRTV_CNT"},
+ {true, "DROP_CNM_PFC_PAUSE_CNT"},
+ {true, "DROP_TORUS_TC_CNT"},
+ {true, "DROP_TORUS_LPBK_CNT"},
+ {true, "PPP_HFS_STS"},
+
+ {true, "PPP_MC_RSLT_STS"},
+ {true, "PPP_P3U_STS"},
+ {true, "PPP_RSLT_DESCR_STS"},
+ {true, "PPP_UMV_STS_0"},
+ {true, "PPP_UMV_STS_1"},
+ {true, "PPP_VFV_STS"},
+
+ {true, "PPP_GRO_KEY_CNT"},
+ {true, "PPP_GRO_INFO_CNT"},
+ {true, "PPP_GRO_DROP_CNT"},
+ {true, "PPP_GRO_OUT_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_DATA_CNT"},
+ {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"},
+
+ {true, "PPP_GRO_INFO_MATCH_CNT"},
+ {true, "PPP_GRO_FREE_ENTRY_CNT"},
+ {true, "PPP_GRO_INNER_DFX_SIGNAL"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+
+ {true, "GET_RX_PKT_CNT_L"},
+ {true, "GET_RX_PKT_CNT_H"},
+ {true, "GET_TX_PKT_CNT_L"},
+ {true, "GET_TX_PKT_CNT_H"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2PRT_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2HOST_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2PRT_PKT_CNT_H"},
+
+ {true, "SEND_MC_FROM_PRT_CNT_L"},
+ {true, "SEND_MC_FROM_PRT_CNT_H"},
+ {true, "SEND_MC_FROM_HOST_CNT_L"},
+ {true, "SEND_MC_FROM_HOST_CNT_H"},
+ {true, "SSU_MC_RD_CNT_L"},
+ {true, "SSU_MC_RD_CNT_H"},
+
+ {true, "SSU_MC_DROP_CNT_L"},
+ {true, "SSU_MC_DROP_CNT_H"},
+ {true, "SSU_MC_RD_PKT_CNT_L"},
+ {true, "SSU_MC_RD_PKT_CNT_H"},
+ {true, "PPP_MC_2HOST_PKT_CNT_L"},
+ {true, "PPP_MC_2HOST_PKT_CNT_H"},
+
+ {true, "PPP_MC_2PRT_PKT_CNT_L"},
+ {true, "PPP_MC_2PRT_PKT_CNT_H"},
+ {true, "NTSNOS_PKT_CNT_L"},
+ {true, "NTSNOS_PKT_CNT_H"},
+ {true, "NTUP_PKT_CNT_L"},
+ {true, "NTUP_PKT_CNT_H"},
+
+ {true, "NTLCL_PKT_CNT_L"},
+ {true, "NTLCL_PKT_CNT_H"},
+ {true, "NTTGT_PKT_CNT_L"},
+ {true, "NTTGT_PKT_CNT_H"},
+ {true, "RTNS_PKT_CNT_L"},
+ {true, "RTNS_PKT_CNT_H"},
+
+ {true, "RTLPBK_PKT_CNT_L"},
+ {true, "RTLPBK_PKT_CNT_H"},
+ {true, "NR_PKT_CNT_L"},
+ {true, "NR_PKT_CNT_H"},
+ {true, "RR_PKT_CNT_L"},
+ {true, "RR_PKT_CNT_H"},
+
+ {true, "MNG_TBL_HIT_CNT_L"},
+ {true, "MNG_TBL_HIT_CNT_H"},
+ {true, "FD_TBL_HIT_CNT_L"},
+ {true, "FD_TBL_HIT_CNT_H"},
+ {true, "FD_LKUP_CNT_L"},
+ {true, "FD_LKUP_CNT_H"},
+
+ {true, "BC_HIT_CNT_L"},
+ {true, "BC_HIT_CNT_H"},
+ {true, "UM_TBL_UC_HIT_CNT_L"},
+ {true, "UM_TBL_UC_HIT_CNT_H"},
+ {true, "UM_TBL_MC_HIT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_CNT_H"},
+
+ {true, "UM_TBL_VMDQ1_HIT_CNT_L"},
+ {true, "UM_TBL_VMDQ1_HIT_CNT_H"},
+ {true, "MTA_TBL_HIT_CNT_L"},
+ {true, "MTA_TBL_HIT_CNT_H"},
+ {true, "FWD_BONDING_HIT_CNT_L"},
+ {true, "FWD_BONDING_HIT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_CNT_H"},
+ {true, "GET_TUNL_PKT_CNT_L"},
+ {true, "GET_TUNL_PKT_CNT_H"},
+ {true, "GET_BMC_PKT_CNT_L"},
+ {true, "GET_BMC_PKT_CNT_H"},
+
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_PRT2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_L"},
+ {true, "SEND_UC_HOST2BMC_PKT_CNT_H"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2HOST_PKT_CNT_H"},
+
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_L"},
+ {true, "SEND_UC_BMC2PRT_PKT_CNT_H"},
+ {true, "PPP_MC_2BMC_PKT_CNT_L"},
+ {true, "PPP_MC_2BMC_PKT_CNT_H"},
+ {true, "VLAN_MIRR_CNT_L"},
+ {true, "VLAN_MIRR_CNT_H"},
+
+ {true, "IG_MIRR_CNT_L"},
+ {true, "IG_MIRR_CNT_H"},
+ {true, "EG_MIRR_CNT_L"},
+ {true, "EG_MIRR_CNT_H"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_L"},
+ {true, "RX_DEFAULT_HOST_HIT_CNT_H"},
+
+ {true, "LAN_PAIR_CNT_L"},
+ {true, "LAN_PAIR_CNT_H"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_L"},
+ {true, "UM_TBL_MC_HIT_PKT_CNT_H"},
+ {true, "MTA_TBL_HIT_PKT_CNT_L"},
+ {true, "MTA_TBL_HIT_PKT_CNT_H"},
+
+ {true, "PROMIS_TBL_HIT_PKT_CNT_L"},
+ {true, "PROMIS_TBL_HIT_PKT_CNT_H"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = {
+ {false, "Reserved"},
+ {true, "FSM_DFX_ST0"},
+ {true, "FSM_DFX_ST1"},
+ {true, "FSM_DFX_ST2"},
+ {true, "FIFO_DFX_ST0"},
+ {true, "FIFO_DFX_ST1"},
+
+ {true, "FIFO_DFX_ST2"},
+ {true, "FIFO_DFX_ST3"},
+ {true, "FIFO_DFX_ST4"},
+ {true, "FIFO_DFX_ST5"},
+ {true, "FIFO_DFX_ST6"},
+ {true, "FIFO_DFX_ST7"},
+
+ {true, "FIFO_DFX_ST8"},
+ {true, "FIFO_DFX_ST9"},
+ {true, "FIFO_DFX_ST10"},
+ {true, "FIFO_DFX_ST11"},
+ {true, "Q_CREDIT_VLD_0"},
+ {true, "Q_CREDIT_VLD_1"},
+
+ {true, "Q_CREDIT_VLD_2"},
+ {true, "Q_CREDIT_VLD_3"},
+ {true, "Q_CREDIT_VLD_4"},
+ {true, "Q_CREDIT_VLD_5"},
+ {true, "Q_CREDIT_VLD_6"},
+ {true, "Q_CREDIT_VLD_7"},
+
+ {true, "Q_CREDIT_VLD_8"},
+ {true, "Q_CREDIT_VLD_9"},
+ {true, "Q_CREDIT_VLD_10"},
+ {true, "Q_CREDIT_VLD_11"},
+ {true, "Q_CREDIT_VLD_12"},
+ {true, "Q_CREDIT_VLD_13"},
+
+ {true, "Q_CREDIT_VLD_14"},
+ {true, "Q_CREDIT_VLD_15"},
+ {true, "Q_CREDIT_VLD_16"},
+ {true, "Q_CREDIT_VLD_17"},
+ {true, "Q_CREDIT_VLD_18"},
+ {true, "Q_CREDIT_VLD_19"},
+
+ {true, "Q_CREDIT_VLD_20"},
+ {true, "Q_CREDIT_VLD_21"},
+ {true, "Q_CREDIT_VLD_22"},
+ {true, "Q_CREDIT_VLD_23"},
+ {true, "Q_CREDIT_VLD_24"},
+ {true, "Q_CREDIT_VLD_25"},
+
+ {true, "Q_CREDIT_VLD_26"},
+ {true, "Q_CREDIT_VLD_27"},
+ {true, "Q_CREDIT_VLD_28"},
+ {true, "Q_CREDIT_VLD_29"},
+ {true, "Q_CREDIT_VLD_30"},
+ {true, "Q_CREDIT_VLD_31"},
+
+ {true, "GRO_BD_SERR_CNT"},
+ {true, "GRO_CONTEXT_SERR_CNT"},
+ {true, "RX_STASH_CFG_SERR_CNT"},
+ {true, "AXI_RD_FBD_SERR_CNT"},
+ {true, "GRO_BD_MERR_CNT"},
+ {true, "GRO_CONTEXT_MERR_CNT"},
+
+ {true, "RX_STASH_CFG_MERR_CNT"},
+ {true, "AXI_RD_FBD_MERR_CNT"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+ {false, "Reserved"},
+};
+
+static const struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = {
+ {true, "q_num"},
+ {true, "RCB_CFG_RX_RING_TAIL"},
+ {true, "RCB_CFG_RX_RING_HEAD"},
+ {true, "RCB_CFG_RX_RING_FBDNUM"},
+ {true, "RCB_CFG_RX_RING_OFFSET"},
+ {true, "RCB_CFG_RX_RING_FBDOFFSET"},
+
+ {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"},
+ {true, "RCB_CFG_TX_RING_TAIL"},
+ {true, "RCB_CFG_TX_RING_HEAD"},
+ {true, "RCB_CFG_TX_RING_FBDNUM"},
+ {true, "RCB_CFG_TX_RING_OFFSET"},
+ {true, "RCB_CFG_TX_RING_EBDNUM"},
+};
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
new file mode 100644
index 000000000..3226ca176
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c
@@ -0,0 +1,2009 @@
+// SPDX-License-Identifier: GPL-2.0+
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#include "hclge_err.h"
+
+static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = {
+ { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = {
+ { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = {
+ { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = {
+ { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_int[] = {
+ { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = {
+ { .int_msk = BIT(0), .msg = "rx_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "tx_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "tx_buf_underrun",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ncsi_err_int[] = {
+ { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = {
+ { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = {
+ { .int_msk = BIT(0), .msg = "tx_vlan_tag_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = {
+ { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_tm_sch_rint[] = {
+ { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_qcn_fifo_rint[] = {
+ { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_qcn_ecc_rint[] = {
+ { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = {
+ { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = {
+ { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(26), .msg = "rd_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(27), .msg = "wr_bus_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(28), .msg = "reg_search_miss",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(29), .msg = "rx_q_search_miss",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = {
+ { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = {
+ { .int_msk = BIT(0), .msg = "over_8bd_no_fe",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(5), .msg = "buf_wait_timeout",
+ .reset_level = HNAE3_NONE_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_com_err_int[] = {
+ { .int_msk = BIT(0), .msg = "buf_sum_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(1), .msg = "ppp_mb_num_err",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(2), .msg = "ppp_mbid_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "ppp_rlt_host_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "cks_edit_position_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "cks_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "vlan_edit_condition_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "vlan_num_ot_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "vlan_num_in_err",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+#define HCLGE_SSU_MEM_ECC_ERR(x) \
+ { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \
+ .reset_level = HNAE3_GLOBAL_RESET }
+
+static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = {
+ HCLGE_SSU_MEM_ECC_ERR(0),
+ HCLGE_SSU_MEM_ECC_ERR(1),
+ HCLGE_SSU_MEM_ECC_ERR(2),
+ HCLGE_SSU_MEM_ECC_ERR(3),
+ HCLGE_SSU_MEM_ECC_ERR(4),
+ HCLGE_SSU_MEM_ECC_ERR(5),
+ HCLGE_SSU_MEM_ECC_ERR(6),
+ HCLGE_SSU_MEM_ECC_ERR(7),
+ HCLGE_SSU_MEM_ECC_ERR(8),
+ HCLGE_SSU_MEM_ECC_ERR(9),
+ HCLGE_SSU_MEM_ECC_ERR(10),
+ HCLGE_SSU_MEM_ECC_ERR(11),
+ HCLGE_SSU_MEM_ECC_ERR(12),
+ HCLGE_SSU_MEM_ECC_ERR(13),
+ HCLGE_SSU_MEM_ECC_ERR(14),
+ HCLGE_SSU_MEM_ECC_ERR(15),
+ HCLGE_SSU_MEM_ECC_ERR(16),
+ HCLGE_SSU_MEM_ECC_ERR(17),
+ HCLGE_SSU_MEM_ECC_ERR(18),
+ HCLGE_SSU_MEM_ECC_ERR(19),
+ HCLGE_SSU_MEM_ECC_ERR(20),
+ HCLGE_SSU_MEM_ECC_ERR(21),
+ HCLGE_SSU_MEM_ECC_ERR(22),
+ HCLGE_SSU_MEM_ECC_ERR(23),
+ HCLGE_SSU_MEM_ECC_ERR(24),
+ HCLGE_SSU_MEM_ECC_ERR(25),
+ HCLGE_SSU_MEM_ECC_ERR(26),
+ HCLGE_SSU_MEM_ECC_ERR(27),
+ HCLGE_SSU_MEM_ECC_ERR(28),
+ HCLGE_SSU_MEM_ECC_ERR(29),
+ HCLGE_SSU_MEM_ECC_ERR(30),
+ HCLGE_SSU_MEM_ECC_ERR(31),
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = {
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = {
+ { .int_msk = BIT(0), .msg = "ig_mac_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "ig_host_inf_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "ig_roc_buf_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(9), .msg = "qm_eof_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(15), .msg = "host_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = {
+ { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = {
+ { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port",
+ .reset_level = HNAE3_FUNC_RESET },
+ { .int_msk = BIT(9), .msg = "low_water_line_err_port",
+ .reset_level = HNAE3_NONE_RESET },
+ { .int_msk = BIT(10), .msg = "hi_water_line_err_port",
+ .reset_level = HNAE3_GLOBAL_RESET },
+ { /* sentinel */ }
+};
+
+static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = {
+ { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" },
+ { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" },
+ { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" },
+ { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" },
+ { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" },
+ { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" },
+ { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" },
+ { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" },
+ { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" },
+ { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" },
+ { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" },
+ { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" },
+ { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" },
+ { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" },
+ { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" },
+ { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" },
+ { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" },
+ { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" },
+ { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" },
+ { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" },
+ { /* sentinel */ }
+};
+
+static void hclge_log_error(struct device *dev, char *reg,
+ const struct hclge_hw_error *err,
+ u32 err_sts, unsigned long *reset_requests)
+{
+ while (err->msg) {
+ if (err->int_msk & err_sts) {
+ dev_err(dev, "%s %s found [error status=0x%x]\n",
+ reg, err->msg, err_sts);
+ if (err->reset_level &&
+ err->reset_level != HNAE3_NONE_RESET)
+ set_bit(err->reset_level, reset_requests);
+ }
+ err++;
+ }
+}
+
+/* hclge_cmd_query_error: read the error information
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @cmd: command opcode
+ * @flag: flag for extended command structure
+ *
+ * This function query the error info from hw register/s using command
+ */
+static int hclge_cmd_query_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc, u32 cmd, u16 flag)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int desc_num = 1;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, true);
+ if (flag) {
+ desc[0].flag |= cpu_to_le16(flag);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, true);
+ desc_num = 2;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
+ if (ret)
+ dev_err(dev, "query error cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int hclge_clear_mac_tnl_int(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_MAC_TNL_INT, false);
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_CLR);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* configure common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false);
+
+ if (en) {
+ desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN);
+ desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN);
+ desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN |
+ HCLGE_MSIX_SRAM_ECC_ERR_INT_EN);
+ desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN);
+ }
+
+ desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK);
+ desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK |
+ HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK);
+ desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK);
+ desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK |
+ HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK);
+ desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure common err interrupts\n", ret);
+
+ return ret;
+}
+
+static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return 0;
+
+ /* configure NCSI error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure NCSI error interrupts\n", ret);
+
+ return ret;
+}
+
+static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* configure IGU,EGU error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false);
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_TYPE);
+ if (en)
+ desc.data[0] |= cpu_to_le32(HCLGE_IGU_ERR_INT_EN);
+
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "fail(%d) to configure IGU common interrupts\n", ret);
+ return ret;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN);
+
+ desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev,
+ "fail(%d) to configure IGU-EGU TNL interrupts\n", ret);
+ return ret;
+ }
+
+ ret = hclge_config_ncsi_hw_err_int(hdev, en);
+
+ return ret;
+}
+
+static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* configure PPP error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+
+ if (cmd == HCLGE_PPP_CMD0_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN);
+ }
+
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK);
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ desc[1].data[2] =
+ cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK);
+ } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) {
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN);
+ }
+
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev, "fail(%d) to configure PPP error intr\n", ret);
+
+ return ret;
+}
+
+static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ int ret;
+
+ ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD,
+ en);
+ if (ret)
+ return ret;
+
+ ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD,
+ en);
+
+ return ret;
+}
+
+static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* configure TM SCH hw errors */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret);
+ return ret;
+ }
+
+ /* configure TM QCN hw errors */
+ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0);
+ if (ret) {
+ dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret);
+ return ret;
+ }
+
+ hclge_cmd_reuse_desc(&desc, false);
+ if (en)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure TM QCN mem errors\n", ret);
+
+ return ret;
+}
+
+static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ /* configure MAC common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN);
+
+ desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure MAC COMMON error intr\n", ret);
+
+ return ret;
+}
+
+int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_TNL_INT_EN, false);
+ if (en)
+ desc.data[0] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN);
+ else
+ desc.data[0] = 0;
+
+ desc.data[1] = cpu_to_le32(HCLGE_MAC_TNL_INT_EN_MASK);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd,
+ bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int desc_num = 1;
+ int ret;
+
+ /* configure PPU error interrupts */
+ if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], cmd, false);
+ if (en) {
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN);
+ desc[1].data[3] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN);
+ desc[1].data[4] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN);
+ }
+
+ desc[1].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK);
+ desc[1].data[1] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK);
+ desc[1].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK);
+ desc[1].data[3] |=
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK);
+ desc_num = 2;
+ } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (en)
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2);
+
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK);
+ } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) {
+ hclge_cmd_setup_basic_desc(&desc[0], cmd, false);
+ if (en)
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN);
+
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK);
+ } else {
+ dev_err(dev, "Invalid cmd to configure PPU error interrupts\n");
+ return -EINVAL;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], desc_num);
+
+ return ret;
+}
+
+static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ int ret;
+
+ ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD,
+ en);
+ if (ret) {
+ dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n",
+ ret);
+ return ret;
+ }
+
+ ret = hclge_config_ppu_error_interrupts(hdev,
+ HCLGE_PPU_MPF_OTHER_INT_CMD,
+ en);
+ if (ret) {
+ dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret);
+ return ret;
+ }
+
+ ret = hclge_config_ppu_error_interrupts(hdev,
+ HCLGE_PPU_PF_OTHER_INT_CMD, en);
+ if (ret)
+ dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n",
+ ret);
+ return ret;
+}
+
+static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* configure SSU ecc error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false);
+ if (en) {
+ desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN);
+ desc[0].data[1] =
+ cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN);
+ desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN);
+ }
+
+ desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK);
+ desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret) {
+ dev_err(dev,
+ "fail(%d) to configure SSU ECC error interrupt\n", ret);
+ return ret;
+ }
+
+ /* configure SSU common error interrupts */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false);
+
+ if (en) {
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_SSU_COMMON_INT_EN);
+ else
+ desc[0].data[0] =
+ cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5));
+ desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN);
+ desc[0].data[2] =
+ cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN);
+ }
+
+ desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK |
+ HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK);
+ desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 2);
+ if (ret)
+ dev_err(dev,
+ "fail(%d) to configure SSU COMMON error intr\n", ret);
+
+ return ret;
+}
+
+/* hclge_query_bd_num: query number of buffer descriptors
+ * @hdev: pointer to struct hclge_dev
+ * @is_ras: true for ras, false for msix
+ * @mpf_bd_num: number of main PF interrupt buffer descriptors
+ * @pf_bd_num: number of not main PF interrupt buffer descriptors
+ *
+ * This function querys number of mpf and pf buffer descriptors.
+ */
+static int hclge_query_bd_num(struct hclge_dev *hdev, bool is_ras,
+ int *mpf_bd_num, int *pf_bd_num)
+{
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_min_bd_num, pf_min_bd_num;
+ enum hclge_opcode_type opcode;
+ struct hclge_desc desc_bd;
+ int ret;
+
+ if (is_ras) {
+ opcode = HCLGE_QUERY_RAS_INT_STS_BD_NUM;
+ mpf_min_bd_num = HCLGE_MPF_RAS_INT_MIN_BD_NUM;
+ pf_min_bd_num = HCLGE_PF_RAS_INT_MIN_BD_NUM;
+ } else {
+ opcode = HCLGE_QUERY_MSIX_INT_STS_BD_NUM;
+ mpf_min_bd_num = HCLGE_MPF_MSIX_INT_MIN_BD_NUM;
+ pf_min_bd_num = HCLGE_PF_MSIX_INT_MIN_BD_NUM;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc_bd, opcode, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1);
+ if (ret) {
+ dev_err(dev, "fail(%d) to query msix int status bd num\n",
+ ret);
+ return ret;
+ }
+
+ *mpf_bd_num = le32_to_cpu(desc_bd.data[0]);
+ *pf_bd_num = le32_to_cpu(desc_bd.data[1]);
+ if (*mpf_bd_num < mpf_min_bd_num || *pf_bd_num < pf_min_bd_num) {
+ dev_err(dev, "Invalid bd num: mpf(%d), pf(%d)\n",
+ *mpf_bd_num, *pf_bd_num);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/* hclge_handle_mpf_ras_error: handle all main PF RAS errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @num: number of extended command structures
+ *
+ * This function handles all the main PF RAS errors in the
+ * hw register/s using command.
+ */
+static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int num)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
+
+ /* query all main PF RAS errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret) {
+ dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* log HNS common errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status)
+ hclge_log_error(dev, "IMP_TCM_ECC_INT_STS",
+ &hclge_imp_tcm_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status)
+ hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS",
+ &hclge_cmdq_nic_mem_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ if ((le32_to_cpu(desc[0].data[2])) & BIT(0))
+ dev_warn(dev, "imp_rd_data_poison_err found\n");
+
+ status = le32_to_cpu(desc[0].data[3]);
+ if (status)
+ hclge_log_error(dev, "TQP_INT_ECC_INT_STS",
+ &hclge_tqp_int_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(desc[0].data[4]);
+ if (status)
+ hclge_log_error(dev, "MSIX_ECC_INT_STS",
+ &hclge_msix_sram_ecc_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log SSU(Storage Switch Unit) errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status)
+ hclge_log_error(dev, "SSU_ECC_MULTI_BIT_INT_0",
+ &hclge_ssu_mem_ecc_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(*(desc_data + 3)) & BIT(0);
+ if (status) {
+ dev_err(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_mem32_ecc_mbit_err found [error status=0x%x]\n",
+ status);
+ set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
+ }
+
+ status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "SSU_COMMON_ERR_INT",
+ &hclge_ssu_com_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log IGU(Ingress Unit) errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "IGU_INT_STS",
+ &hclge_igu_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log PPP(Programmable Packet Process) errors */
+ desc_data = (__le32 *)&desc[4];
+ status = le32_to_cpu(*(desc_data + 1));
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1",
+ &hclge_ppp_mpf_abnormal_int_st1[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK;
+ if (status)
+ hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppp_mpf_abnormal_int_st3[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[5];
+ status = le32_to_cpu(*(desc_data + 1));
+ if (status) {
+ dev_err(dev,
+ "PPU_MPF_ABNORMAL_INT_ST1 rpu_rx_pkt_ecc_mbit_err found\n");
+ set_bit(HNAE3_GLOBAL_RESET, &ae_dev->hw_err_reset_req);
+ }
+
+ status = le32_to_cpu(*(desc_data + 2));
+ if (status)
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2",
+ &hclge_ppu_mpf_abnormal_int_st2[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK;
+ if (status)
+ hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3",
+ &hclge_ppu_mpf_abnormal_int_st3[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log TM(Traffic Manager) errors */
+ desc_data = (__le32 *)&desc[6];
+ status = le32_to_cpu(*desc_data);
+ if (status)
+ hclge_log_error(dev, "TM_SCH_RINT",
+ &hclge_tm_sch_rint[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log QCN(Quantized Congestion Control) errors */
+ desc_data = (__le32 *)&desc[7];
+ status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "QCN_FIFO_RINT",
+ &hclge_qcn_fifo_rint[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "QCN_ECC_RINT",
+ &hclge_qcn_ecc_rint[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log NCSI errors */
+ desc_data = (__le32 *)&desc[9];
+ status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "NCSI_ECC_INT_RPT",
+ &hclge_ncsi_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* clear all main PF RAS errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+/* hclge_handle_pf_ras_error: handle all PF RAS errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @num: number of extended command structures
+ *
+ * This function handles all the PF RAS errors in the
+ * hw register/s using command.
+ */
+static int hclge_handle_pf_ras_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int num)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
+
+ /* query all PF RAS errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret) {
+ dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* log SSU(Storage Switch Unit) errors */
+ status = le32_to_cpu(desc[0].data[0]);
+ if (status)
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_err_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(desc[0].data[1]);
+ if (status)
+ hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT",
+ &hclge_ssu_fifo_overflow_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ status = le32_to_cpu(desc[0].data[2]);
+ if (status)
+ hclge_log_error(dev, "SSU_ETS_TCG_INT",
+ &hclge_ssu_ets_tcg_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK;
+ if (status)
+ hclge_log_error(dev, "IGU_EGU_TNL_INT_STS",
+ &hclge_igu_egu_tnl_int[0], status,
+ &ae_dev->hw_err_reset_req);
+
+ /* log PPU(RCB) errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_RAS_MASK;
+ if (status) {
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST0",
+ &hclge_ppu_pf_abnormal_int[0], status,
+ &ae_dev->hw_err_reset_req);
+ hclge_report_hw_error(hdev, HNAE3_PPU_POISON_ERROR);
+ }
+
+ /* clear all PF RAS errors */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], num);
+ if (ret)
+ dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int hclge_handle_all_ras_errors(struct hclge_dev *hdev)
+{
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ int ret;
+
+ /* query the number of registers in the RAS int status */
+ ret = hclge_query_bd_num(hdev, true, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ return ret;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ /* handle all main PF RAS errors */
+ ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num);
+ if (ret) {
+ kfree(desc);
+ return ret;
+ }
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+
+ /* handle all PF RAS errors */
+ ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num);
+ kfree(desc);
+
+ return ret;
+}
+
+static int hclge_log_rocee_axi_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[3];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_QUERY_ROCEE_AXI_RAS_INFO_CMD,
+ true);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 3);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE AXI error sts\n", ret);
+ return ret;
+ }
+
+ dev_err(dev, "AXI1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "AXI2: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[1].data[0]), le32_to_cpu(desc[1].data[1]),
+ le32_to_cpu(desc[1].data[2]), le32_to_cpu(desc[1].data[3]),
+ le32_to_cpu(desc[1].data[4]), le32_to_cpu(desc[1].data[5]));
+ dev_err(dev, "AXI3: %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[2].data[0]), le32_to_cpu(desc[2].data[1]),
+ le32_to_cpu(desc[2].data[2]), le32_to_cpu(desc[2].data[3]));
+
+ return 0;
+}
+
+static int hclge_log_rocee_ecc_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_QUERY_ROCEE_ECC_RAS_INFO_CMD,
+ HCLGE_CMD_FLAG_NEXT);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE ECC error sts\n", ret);
+ return ret;
+ }
+
+ dev_err(dev, "ECC1: %08X %08X %08X %08X %08X %08X\n",
+ le32_to_cpu(desc[0].data[0]), le32_to_cpu(desc[0].data[1]),
+ le32_to_cpu(desc[0].data[2]), le32_to_cpu(desc[0].data[3]),
+ le32_to_cpu(desc[0].data[4]), le32_to_cpu(desc[0].data[5]));
+ dev_err(dev, "ECC2: %08X %08X %08X\n", le32_to_cpu(desc[1].data[0]),
+ le32_to_cpu(desc[1].data[1]), le32_to_cpu(desc[1].data[2]));
+
+ return 0;
+}
+
+static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ int ret;
+
+ /* read overflow error status */
+ ret = hclge_cmd_query_error(hdev, &desc[0], HCLGE_ROCEE_PF_RAS_INT_CMD,
+ 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret);
+ return ret;
+ }
+
+ /* log overflow error */
+ if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ const struct hclge_hw_error *err;
+ u32 err_sts;
+
+ err = &hclge_rocee_qmm_ovf_err_int[0];
+ err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK &
+ le32_to_cpu(desc[0].data[0]);
+ while (err->msg) {
+ if (err->int_msk == err_sts) {
+ dev_err(dev, "%s [error status=0x%x] found\n",
+ err->msg,
+ le32_to_cpu(desc[0].data[0]));
+ break;
+ }
+ err++;
+ }
+ }
+
+ if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ dev_err(dev, "ROCEE TSP OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[1]));
+ }
+
+ if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) {
+ dev_err(dev, "ROCEE SCC OVF [error status=0x%x] found\n",
+ le32_to_cpu(desc[0].data[2]));
+ }
+
+ return 0;
+}
+
+static enum hnae3_reset_type
+hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev)
+{
+ enum hnae3_reset_type reset_type = HNAE3_NONE_RESET;
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc[2];
+ unsigned int status;
+ int ret;
+
+ /* read RAS error interrupt status */
+ ret = hclge_cmd_query_error(hdev, &desc[0],
+ HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, 0);
+ if (ret) {
+ dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret);
+ /* reset everything for now */
+ return HNAE3_GLOBAL_RESET;
+ }
+
+ status = le32_to_cpu(desc[0].data[0]);
+
+ if (status & HCLGE_ROCEE_AXI_ERR_INT_MASK) {
+ if (status & HCLGE_ROCEE_RERR_INT_MASK)
+ dev_err(dev, "ROCEE RAS AXI rresp error\n");
+
+ if (status & HCLGE_ROCEE_BERR_INT_MASK)
+ dev_err(dev, "ROCEE RAS AXI bresp error\n");
+
+ reset_type = HNAE3_FUNC_RESET;
+
+ hclge_report_hw_error(hdev, HNAE3_ROCEE_AXI_RESP_ERROR);
+
+ ret = hclge_log_rocee_axi_error(hdev);
+ if (ret)
+ return HNAE3_GLOBAL_RESET;
+ }
+
+ if (status & HCLGE_ROCEE_ECC_INT_MASK) {
+ dev_err(dev, "ROCEE RAS 2bit ECC error\n");
+ reset_type = HNAE3_GLOBAL_RESET;
+
+ ret = hclge_log_rocee_ecc_error(hdev);
+ if (ret)
+ return HNAE3_GLOBAL_RESET;
+ }
+
+ if (status & HCLGE_ROCEE_OVF_INT_MASK) {
+ ret = hclge_log_rocee_ovf_error(hdev);
+ if (ret) {
+ dev_err(dev, "failed(%d) to process ovf error\n", ret);
+ /* reset everything for now */
+ return HNAE3_GLOBAL_RESET;
+ }
+ }
+
+ /* clear error status */
+ hclge_cmd_reuse_desc(&desc[0], false);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
+ if (ret) {
+ dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret);
+ /* reset everything for now */
+ return HNAE3_GLOBAL_RESET;
+ }
+
+ return reset_type;
+}
+
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en)
+{
+ struct device *dev = &hdev->pdev->dev;
+ struct hclge_desc desc;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2 ||
+ !hnae3_dev_roce_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false);
+ if (en) {
+ /* enable ROCEE hw error interrupts */
+ desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN);
+ desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN);
+
+ hclge_log_and_clear_rocee_ras_error(hdev);
+ }
+ desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK);
+ desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret);
+
+ return ret;
+}
+
+static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ enum hnae3_reset_type reset_type;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return;
+
+ reset_type = hclge_log_and_clear_rocee_ras_error(hdev);
+ if (reset_type != HNAE3_NONE_RESET)
+ set_bit(reset_type, &ae_dev->hw_err_reset_req);
+}
+
+static const struct hclge_hw_blk hw_blk[] = {
+ {
+ .msk = BIT(0), .name = "IGU_EGU",
+ .config_err_int = hclge_config_igu_egu_hw_err_int,
+ },
+ {
+ .msk = BIT(1), .name = "PPP",
+ .config_err_int = hclge_config_ppp_hw_err_int,
+ },
+ {
+ .msk = BIT(2), .name = "SSU",
+ .config_err_int = hclge_config_ssu_hw_err_int,
+ },
+ {
+ .msk = BIT(3), .name = "PPU",
+ .config_err_int = hclge_config_ppu_hw_err_int,
+ },
+ {
+ .msk = BIT(4), .name = "TM",
+ .config_err_int = hclge_config_tm_hw_err_int,
+ },
+ {
+ .msk = BIT(5), .name = "COMMON",
+ .config_err_int = hclge_config_common_hw_err_int,
+ },
+ {
+ .msk = BIT(8), .name = "MAC",
+ .config_err_int = hclge_config_mac_err_int,
+ },
+ { /* sentinel */ }
+};
+
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state)
+{
+ const struct hclge_hw_blk *module = hw_blk;
+ int ret = 0;
+
+ while (module->name) {
+ if (module->config_err_int) {
+ ret = module->config_err_int(hdev, state);
+ if (ret)
+ return ret;
+ }
+ module++;
+ }
+
+ return ret;
+}
+
+pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ u32 status;
+
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "Can't recover - RAS error reported during dev init\n");
+ return PCI_ERS_RESULT_NONE;
+ }
+
+ status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ if (status & HCLGE_RAS_REG_NFE_MASK ||
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK)
+ ae_dev->hw_err_reset_req = 0;
+ else
+ goto out;
+
+ /* Handling Non-fatal HNS RAS errors */
+ if (status & HCLGE_RAS_REG_NFE_MASK) {
+ dev_err(dev,
+ "HNS Non-Fatal RAS error(status=0x%x) identified\n",
+ status);
+ hclge_handle_all_ras_errors(hdev);
+ }
+
+ /* Handling Non-fatal Rocee RAS errors */
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 &&
+ status & HCLGE_RAS_REG_ROCEE_ERR_MASK) {
+ dev_err(dev, "ROCEE Non-Fatal RAS error identified\n");
+ hclge_handle_rocee_ras_error(ae_dev);
+ }
+
+ if (ae_dev->hw_err_reset_req)
+ return PCI_ERS_RESULT_NEED_RESET;
+
+out:
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+static int hclge_clear_hw_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc, bool is_mpf,
+ u32 bd_num)
+{
+ if (is_mpf)
+ desc[0].opcode =
+ cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT);
+ else
+ desc[0].opcode = cpu_to_le16(HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT);
+
+ desc[0].flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
+
+ return hclge_cmd_send(&hdev->hw, &desc[0], bd_num);
+}
+
+/* hclge_query_8bd_info: query information about over_8bd_nfe_err
+ * @hdev: pointer to struct hclge_dev
+ * @vf_id: Index of the virtual function with error
+ * @q_id: Physical index of the queue with error
+ *
+ * This function get specific index of queue and function which causes
+ * over_8bd_nfe_err by using command. If vf_id is 0, it means error is
+ * caused by PF instead of VF.
+ */
+static int hclge_query_over_8bd_err_info(struct hclge_dev *hdev, u16 *vf_id,
+ u16 *q_id)
+{
+ struct hclge_query_ppu_pf_other_int_dfx_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PPU_PF_OTHER_INT_DFX, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ req = (struct hclge_query_ppu_pf_other_int_dfx_cmd *)desc.data;
+ *vf_id = le16_to_cpu(req->over_8bd_no_fe_vf_id);
+ *q_id = le16_to_cpu(req->over_8bd_no_fe_qid);
+
+ return 0;
+}
+
+/* hclge_handle_over_8bd_err: handle MSI-X error named over_8bd_nfe_err
+ * @hdev: pointer to struct hclge_dev
+ * @reset_requests: reset level that we need to trigger later
+ *
+ * over_8bd_nfe_err is a special MSI-X because it may caused by a VF, in
+ * that case, we need to trigger VF reset. Otherwise, a PF reset is needed.
+ */
+static void hclge_handle_over_8bd_err(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ u16 vf_id;
+ u16 q_id;
+ int ret;
+
+ ret = hclge_query_over_8bd_err_info(hdev, &vf_id, &q_id);
+ if (ret) {
+ dev_err(dev, "fail(%d) to query over_8bd_no_fe info\n",
+ ret);
+ return;
+ }
+
+ dev_err(dev, "PPU_PF_ABNORMAL_INT_ST over_8bd_no_fe found, vf_id(%u), queue_id(%u)\n",
+ vf_id, q_id);
+
+ if (vf_id) {
+ if (vf_id >= hdev->num_alloc_vport) {
+ dev_err(dev, "invalid vf id(%u)\n", vf_id);
+ return;
+ }
+
+ /* If we need to trigger other reset whose level is higher
+ * than HNAE3_VF_FUNC_RESET, no need to trigger a VF reset
+ * here.
+ */
+ if (*reset_requests != 0)
+ return;
+
+ ret = hclge_inform_reset_assert_to_vf(&hdev->vport[vf_id]);
+ if (ret)
+ dev_err(dev, "inform reset to vf(%u) failed %d!\n",
+ hdev->vport->vport_id, ret);
+ } else {
+ set_bit(HNAE3_FUNC_RESET, reset_requests);
+ }
+}
+
+/* hclge_handle_mpf_msix_error: handle all main PF MSI-X errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @mpf_bd_num: number of extended command structures
+ * @reset_requests: record of the reset level that we need
+ *
+ * This function handles all the main PF MSI-X errors in the hw register/s
+ * using command.
+ */
+static int hclge_handle_mpf_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int mpf_bd_num,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
+ /* query all main PF MSIx errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "query all mpf msix int cmd failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* log MAC errors */
+ desc_data = (__le32 *)&desc[1];
+ status = le32_to_cpu(*desc_data);
+ if (status)
+ hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R",
+ &hclge_mac_afifo_tnl_int[0], status,
+ reset_requests);
+
+ /* log PPU(RCB) MPF errors */
+ desc_data = (__le32 *)&desc[5];
+ status = le32_to_cpu(*(desc_data + 2)) &
+ HCLGE_PPU_MPF_INT_ST2_MSIX_MASK;
+ if (status)
+ dev_err(dev, "PPU_MPF_ABNORMAL_INT_ST2 rx_q_search_miss found [dfx status=0x%x\n]",
+ status);
+
+ /* clear all main PF MSIx errors */
+ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
+ if (ret)
+ dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+/* hclge_handle_pf_msix_error: handle all PF MSI-X errors
+ * @hdev: pointer to struct hclge_dev
+ * @desc: descriptor for describing the command
+ * @mpf_bd_num: number of extended command structures
+ * @reset_requests: record of the reset level that we need
+ *
+ * This function handles all the PF MSI-X errors in the hw register/s using
+ * command.
+ */
+static int hclge_handle_pf_msix_error(struct hclge_dev *hdev,
+ struct hclge_desc *desc,
+ int pf_bd_num,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+ __le32 *desc_data;
+ u32 status;
+ int ret;
+
+ /* query all PF MSIx errors */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num);
+ if (ret) {
+ dev_err(dev, "query all pf msix int cmd failed (%d)\n", ret);
+ return ret;
+ }
+
+ /* log SSU PF errors */
+ status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK;
+ if (status)
+ hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT",
+ &hclge_ssu_port_based_pf_int[0],
+ status, reset_requests);
+
+ /* read and log PPP PF errors */
+ desc_data = (__le32 *)&desc[2];
+ status = le32_to_cpu(*desc_data);
+ if (status)
+ hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0",
+ &hclge_ppp_pf_abnormal_int[0],
+ status, reset_requests);
+
+ /* log PPU(RCB) PF errors */
+ desc_data = (__le32 *)&desc[3];
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK;
+ if (status)
+ hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST",
+ &hclge_ppu_pf_abnormal_int[0],
+ status, reset_requests);
+
+ status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_OVER_8BD_ERR_MASK;
+ if (status)
+ hclge_handle_over_8bd_err(hdev, reset_requests);
+
+ /* clear all PF MSIx errors */
+ ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
+ if (ret)
+ dev_err(dev, "clear all pf msix int cmd failed (%d)\n", ret);
+
+ return ret;
+}
+
+static int hclge_handle_all_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct hclge_mac_tnl_stats mac_tnl_stats;
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ u32 status;
+ int ret;
+
+ /* query the number of bds for the MSIx int status */
+ ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ goto out;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ ret = hclge_handle_mpf_msix_error(hdev, desc, mpf_bd_num,
+ reset_requests);
+ if (ret)
+ goto msi_error;
+
+ memset(desc, 0, bd_num * sizeof(struct hclge_desc));
+ ret = hclge_handle_pf_msix_error(hdev, desc, pf_bd_num, reset_requests);
+ if (ret)
+ goto msi_error;
+
+ /* query and clear mac tnl interruptions */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_MAC_TNL_INT,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc[0], 1);
+ if (ret) {
+ dev_err(dev, "query mac tnl int cmd failed (%d)\n", ret);
+ goto msi_error;
+ }
+
+ status = le32_to_cpu(desc->data[0]);
+ if (status) {
+ /* When mac tnl interrupt occurs, we record current time and
+ * register status here in a fifo, then clear the status. So
+ * that if link status changes suddenly at some time, we can
+ * query them by debugfs.
+ */
+ mac_tnl_stats.time = local_clock();
+ mac_tnl_stats.status = status;
+ kfifo_put(&hdev->mac_tnl_log, mac_tnl_stats);
+ ret = hclge_clear_mac_tnl_int(hdev);
+ if (ret)
+ dev_err(dev, "clear mac tnl int failed (%d)\n", ret);
+ }
+
+msi_error:
+ kfree(desc);
+out:
+ return ret;
+}
+
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ if (!test_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state)) {
+ dev_err(dev,
+ "Can't handle - MSIx error reported during dev init\n");
+ return 0;
+ }
+
+ return hclge_handle_all_hw_msix_error(hdev, reset_requests);
+}
+
+void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_DESC_NO_DATA_LEN 8
+
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct device *dev = &hdev->pdev->dev;
+ u32 mpf_bd_num, pf_bd_num, bd_num;
+ struct hclge_desc *desc;
+ u32 status;
+ int ret;
+
+ ae_dev->hw_err_reset_req = 0;
+ status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG);
+
+ /* query the number of bds for the MSIx int status */
+ ret = hclge_query_bd_num(hdev, false, &mpf_bd_num, &pf_bd_num);
+ if (ret)
+ return;
+
+ bd_num = max_t(u32, mpf_bd_num, pf_bd_num);
+ desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return;
+
+ /* Clear HNS hw errors reported through msix */
+ memset(&desc[0].data[0], 0xFF, mpf_bd_num * sizeof(struct hclge_desc) -
+ HCLGE_DESC_NO_DATA_LEN);
+ ret = hclge_clear_hw_msix_error(hdev, desc, true, mpf_bd_num);
+ if (ret) {
+ dev_err(dev, "fail(%d) to clear mpf msix int during init\n",
+ ret);
+ goto msi_error;
+ }
+
+ memset(&desc[0].data[0], 0xFF, pf_bd_num * sizeof(struct hclge_desc) -
+ HCLGE_DESC_NO_DATA_LEN);
+ ret = hclge_clear_hw_msix_error(hdev, desc, false, pf_bd_num);
+ if (ret) {
+ dev_err(dev, "fail(%d) to clear pf msix int during init\n",
+ ret);
+ goto msi_error;
+ }
+
+ /* Handle Non-fatal HNS RAS errors */
+ if (status & HCLGE_RAS_REG_NFE_MASK) {
+ dev_err(dev, "HNS hw error(RAS) identified during init\n");
+ hclge_handle_all_ras_errors(hdev);
+ }
+
+msi_error:
+ kfree(desc);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
new file mode 100644
index 000000000..d647f3c84
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGE_ERR_H
+#define __HCLGE_ERR_H
+
+#include "hclge_main.h"
+#include "hnae3.h"
+
+#define HCLGE_MPF_RAS_INT_MIN_BD_NUM 10
+#define HCLGE_PF_RAS_INT_MIN_BD_NUM 4
+#define HCLGE_MPF_MSIX_INT_MIN_BD_NUM 10
+#define HCLGE_PF_MSIX_INT_MIN_BD_NUM 4
+
+#define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00
+#define HCLGE_RAS_REG_NFE_MASK 0xFF00
+#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000
+
+#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00
+
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN 0x300
+#define HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK 0x300
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN 0xFFFF
+#define HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK 0xFFFF
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN 0xFFFF0000
+#define HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK 0xFFFF0000
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN 0x0100
+#define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100
+#define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF
+#define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF
+#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000
+#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000
+#define HCLGE_IGU_ERR_INT_EN 0x0000000F
+#define HCLGE_IGU_ERR_INT_TYPE 0x00000660
+#define HCLGE_IGU_ERR_INT_EN_MASK 0x000F
+#define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF
+#define HCLGE_IGU_TNL_ERR_INT_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN 0xFFFFFFFF
+#define HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK 0xFFFFFFFF
+#define HCLGE_PPP_PF_ERR_INT_EN 0x0003
+#define HCLGE_PPP_PF_ERR_INT_EN_MASK 0x0003
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN 0x003F
+#define HCLGE_PPP_MPF_ECC_ERR_INT3_EN_MASK 0x003F
+#define HCLGE_TM_SCH_ECC_ERR_INT_EN 0x3
+#define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF
+#define HCLGE_NCSI_ERR_INT_EN 0x3
+#define HCLGE_NCSI_ERR_INT_TYPE 0x9
+#define HCLGE_MAC_COMMON_ERR_INT_EN 0x107FF
+#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK 0x107FF
+#define HCLGE_MAC_TNL_INT_EN GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_EN_MASK GENMASK(9, 0)
+#define HCLGE_MAC_TNL_INT_CLR GENMASK(9, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2 0xB
+#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB
+#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0)
+#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16)
+#define HCLGE_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0)
+#define HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0)
+#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0)
+#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0)
+#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0)
+#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0)
+#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN 0x0101
+#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101
+#define HCLGE_SSU_COMMON_INT_EN GENMASK(9, 0)
+#define HCLGE_SSU_COMMON_INT_EN_MASK GENMASK(9, 0)
+#define HCLGE_SSU_PORT_BASED_ERR_INT_EN 0x0BFF
+#define HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000
+#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0)
+#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0)
+
+#define HCLGE_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0)
+#define HCLGE_SSU_PORT_INT_MSIX_MASK 0x7BFF
+#define HCLGE_IGU_INT_MASK GENMASK(3, 0)
+#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0)
+#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0)
+#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0)
+#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK BIT(29)
+#define HCLGE_PPU_PF_INT_RAS_MASK 0x18
+#define HCLGE_PPU_PF_INT_MSIX_MASK 0x26
+#define HCLGE_PPU_PF_OVER_8BD_ERR_MASK 0x01
+#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0)
+#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0)
+#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0)
+
+#define HCLGE_ROCEE_RAS_NFE_INT_EN 0xF
+#define HCLGE_ROCEE_RAS_CE_INT_EN 0x1
+#define HCLGE_ROCEE_RAS_NFE_INT_EN_MASK 0xF
+#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1
+#define HCLGE_ROCEE_RERR_INT_MASK BIT(0)
+#define HCLGE_ROCEE_BERR_INT_MASK BIT(1)
+#define HCLGE_ROCEE_AXI_ERR_INT_MASK GENMASK(1, 0)
+#define HCLGE_ROCEE_ECC_INT_MASK BIT(2)
+#define HCLGE_ROCEE_OVF_INT_MASK BIT(3)
+#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000
+#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F
+
+enum hclge_err_int_type {
+ HCLGE_ERR_INT_MSIX = 0,
+ HCLGE_ERR_INT_RAS_CE = 1,
+ HCLGE_ERR_INT_RAS_NFE = 2,
+ HCLGE_ERR_INT_RAS_FE = 3,
+};
+
+struct hclge_hw_blk {
+ u32 msk;
+ const char *name;
+ int (*config_err_int)(struct hclge_dev *hdev, bool en);
+};
+
+struct hclge_hw_error {
+ u32 int_msk;
+ const char *msg;
+ enum hnae3_reset_type reset_level;
+};
+
+int hclge_config_mac_tnl_int(struct hclge_dev *hdev, bool en);
+int hclge_config_nic_hw_error(struct hclge_dev *hdev, bool state);
+int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en);
+void hclge_handle_all_hns_hw_errors(struct hnae3_ae_dev *ae_dev);
+pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev);
+int hclge_handle_hw_msix_error(struct hclge_dev *hdev,
+ unsigned long *reset_requests);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
new file mode 100644
index 000000000..deba485ce
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -0,0 +1,11550 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/if_vlan.h>
+#include <linux/crash_dump.h>
+#include <net/rtnetlink.h>
+#include "hclge_cmd.h"
+#include "hclge_dcb.h"
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hclge_mdio.h"
+#include "hclge_tm.h"
+#include "hclge_err.h"
+#include "hnae3.h"
+
+#define HCLGE_NAME "hclge"
+#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
+#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
+
+#define HCLGE_BUF_SIZE_UNIT 256U
+#define HCLGE_BUF_MUL_BY 2
+#define HCLGE_BUF_DIV_BY 2
+#define NEED_RESERVE_TC_NUM 2
+#define BUF_MAX_PERCENT 100
+#define BUF_RESERVE_PERCENT 90
+
+#define HCLGE_RESET_MAX_FAIL_CNT 5
+#define HCLGE_RESET_SYNC_TIME 100
+#define HCLGE_PF_RESET_SYNC_TIME 20
+#define HCLGE_PF_RESET_SYNC_CNT 1500
+
+/* Get DFX BD number offset */
+#define HCLGE_DFX_BIOS_BD_OFFSET 1
+#define HCLGE_DFX_SSU_0_BD_OFFSET 2
+#define HCLGE_DFX_SSU_1_BD_OFFSET 3
+#define HCLGE_DFX_IGU_BD_OFFSET 4
+#define HCLGE_DFX_RPU_0_BD_OFFSET 5
+#define HCLGE_DFX_RPU_1_BD_OFFSET 6
+#define HCLGE_DFX_NCSI_BD_OFFSET 7
+#define HCLGE_DFX_RTC_BD_OFFSET 8
+#define HCLGE_DFX_PPP_BD_OFFSET 9
+#define HCLGE_DFX_RCB_BD_OFFSET 10
+#define HCLGE_DFX_TQP_BD_OFFSET 11
+#define HCLGE_DFX_SSU_2_BD_OFFSET 12
+
+#define HCLGE_LINK_STATUS_MS 10
+
+#define HCLGE_VF_VPORT_START_NUM 1
+
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
+static int hclge_init_vlan_config(struct hclge_dev *hdev);
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev);
+static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle);
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev);
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle);
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr);
+static int hclge_set_default_loopback(struct hclge_dev *hdev);
+
+static void hclge_sync_mac_table(struct hclge_dev *hdev);
+static void hclge_restore_hw_table(struct hclge_dev *hdev);
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev);
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt);
+
+static struct hnae3_ae_algo ae_algo;
+
+static struct workqueue_struct *hclge_wq;
+
+static const struct pci_device_id ae_algo_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_200G_RDMA), 0},
+ /* required last entry */
+ {0, }
+};
+
+MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl);
+
+static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG,
+ HCLGE_CMDQ_TX_ADDR_H_REG,
+ HCLGE_CMDQ_TX_DEPTH_REG,
+ HCLGE_CMDQ_TX_TAIL_REG,
+ HCLGE_CMDQ_TX_HEAD_REG,
+ HCLGE_CMDQ_RX_ADDR_L_REG,
+ HCLGE_CMDQ_RX_ADDR_H_REG,
+ HCLGE_CMDQ_RX_DEPTH_REG,
+ HCLGE_CMDQ_RX_TAIL_REG,
+ HCLGE_CMDQ_RX_HEAD_REG,
+ HCLGE_VECTOR0_CMDQ_SRC_REG,
+ HCLGE_CMDQ_INTR_STS_REG,
+ HCLGE_CMDQ_INTR_EN_REG,
+ HCLGE_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE,
+ HCLGE_VECTOR0_OTER_EN_REG,
+ HCLGE_MISC_RESET_STS_REG,
+ HCLGE_MISC_VECTOR_INT_STS,
+ HCLGE_GLOBAL_RESET_REG,
+ HCLGE_FUN_RST_ING,
+ HCLGE_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG,
+ HCLGE_RING_RX_ADDR_H_REG,
+ HCLGE_RING_RX_BD_NUM_REG,
+ HCLGE_RING_RX_BD_LENGTH_REG,
+ HCLGE_RING_RX_MERGE_EN_REG,
+ HCLGE_RING_RX_TAIL_REG,
+ HCLGE_RING_RX_HEAD_REG,
+ HCLGE_RING_RX_FBD_NUM_REG,
+ HCLGE_RING_RX_OFFSET_REG,
+ HCLGE_RING_RX_FBD_OFFSET_REG,
+ HCLGE_RING_RX_STASH_REG,
+ HCLGE_RING_RX_BD_ERR_REG,
+ HCLGE_RING_TX_ADDR_L_REG,
+ HCLGE_RING_TX_ADDR_H_REG,
+ HCLGE_RING_TX_BD_NUM_REG,
+ HCLGE_RING_TX_PRIORITY_REG,
+ HCLGE_RING_TX_TC_REG,
+ HCLGE_RING_TX_MERGE_EN_REG,
+ HCLGE_RING_TX_TAIL_REG,
+ HCLGE_RING_TX_HEAD_REG,
+ HCLGE_RING_TX_FBD_NUM_REG,
+ HCLGE_RING_TX_OFFSET_REG,
+ HCLGE_RING_TX_EBD_NUM_REG,
+ HCLGE_RING_TX_EBD_OFFSET_REG,
+ HCLGE_RING_TX_BD_ERR_REG,
+ HCLGE_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG,
+ HCLGE_TQP_INTR_GL0_REG,
+ HCLGE_TQP_INTR_GL1_REG,
+ HCLGE_TQP_INTR_GL2_REG,
+ HCLGE_TQP_INTR_RL_REG};
+
+static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = {
+ "App Loopback test",
+ "Serdes serial Loopback test",
+ "Serdes parallel Loopback test",
+ "Phy Loopback test"
+};
+
+static const struct hclge_comm_stats_str g_mac_stats_string[] = {
+ {"mac_tx_mac_pause_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_mac_pause_num)},
+ {"mac_rx_mac_pause_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_mac_pause_num)},
+ {"mac_tx_control_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_ctrl_pkt_num)},
+ {"mac_rx_control_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_ctrl_pkt_num)},
+ {"mac_tx_pfc_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pause_pkt_num)},
+ {"mac_tx_pfc_pri0_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri0_pkt_num)},
+ {"mac_tx_pfc_pri1_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri1_pkt_num)},
+ {"mac_tx_pfc_pri2_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri2_pkt_num)},
+ {"mac_tx_pfc_pri3_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri3_pkt_num)},
+ {"mac_tx_pfc_pri4_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri4_pkt_num)},
+ {"mac_tx_pfc_pri5_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri5_pkt_num)},
+ {"mac_tx_pfc_pri6_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri6_pkt_num)},
+ {"mac_tx_pfc_pri7_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_pfc_pri7_pkt_num)},
+ {"mac_rx_pfc_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pause_pkt_num)},
+ {"mac_rx_pfc_pri0_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri0_pkt_num)},
+ {"mac_rx_pfc_pri1_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri1_pkt_num)},
+ {"mac_rx_pfc_pri2_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri2_pkt_num)},
+ {"mac_rx_pfc_pri3_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri3_pkt_num)},
+ {"mac_rx_pfc_pri4_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri4_pkt_num)},
+ {"mac_rx_pfc_pri5_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri5_pkt_num)},
+ {"mac_rx_pfc_pri6_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri6_pkt_num)},
+ {"mac_rx_pfc_pri7_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_pfc_pri7_pkt_num)},
+ {"mac_tx_total_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_pkt_num)},
+ {"mac_tx_total_oct_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_total_oct_num)},
+ {"mac_tx_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_pkt_num)},
+ {"mac_tx_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_pkt_num)},
+ {"mac_tx_good_oct_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_good_oct_num)},
+ {"mac_tx_bad_oct_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_bad_oct_num)},
+ {"mac_tx_uni_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_uni_pkt_num)},
+ {"mac_tx_multi_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_multi_pkt_num)},
+ {"mac_tx_broad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_broad_pkt_num)},
+ {"mac_tx_undersize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undersize_pkt_num)},
+ {"mac_tx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_oversize_pkt_num)},
+ {"mac_tx_64_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_64_oct_pkt_num)},
+ {"mac_tx_65_127_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_65_127_oct_pkt_num)},
+ {"mac_tx_128_255_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_128_255_oct_pkt_num)},
+ {"mac_tx_256_511_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_256_511_oct_pkt_num)},
+ {"mac_tx_512_1023_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_512_1023_oct_pkt_num)},
+ {"mac_tx_1024_1518_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1024_1518_oct_pkt_num)},
+ {"mac_tx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_2047_oct_pkt_num)},
+ {"mac_tx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_2048_4095_oct_pkt_num)},
+ {"mac_tx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_4096_8191_oct_pkt_num)},
+ {"mac_tx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_8192_9216_oct_pkt_num)},
+ {"mac_tx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_9217_12287_oct_pkt_num)},
+ {"mac_tx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_12288_16383_oct_pkt_num)},
+ {"mac_tx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_good_oct_pkt_num)},
+ {"mac_tx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_1519_max_bad_oct_pkt_num)},
+ {"mac_rx_total_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_pkt_num)},
+ {"mac_rx_total_oct_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_total_oct_num)},
+ {"mac_rx_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_pkt_num)},
+ {"mac_rx_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_pkt_num)},
+ {"mac_rx_good_oct_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_good_oct_num)},
+ {"mac_rx_bad_oct_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_bad_oct_num)},
+ {"mac_rx_uni_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_uni_pkt_num)},
+ {"mac_rx_multi_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_multi_pkt_num)},
+ {"mac_rx_broad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_broad_pkt_num)},
+ {"mac_rx_undersize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undersize_pkt_num)},
+ {"mac_rx_oversize_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_oversize_pkt_num)},
+ {"mac_rx_64_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_64_oct_pkt_num)},
+ {"mac_rx_65_127_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_65_127_oct_pkt_num)},
+ {"mac_rx_128_255_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_128_255_oct_pkt_num)},
+ {"mac_rx_256_511_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_256_511_oct_pkt_num)},
+ {"mac_rx_512_1023_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_512_1023_oct_pkt_num)},
+ {"mac_rx_1024_1518_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1024_1518_oct_pkt_num)},
+ {"mac_rx_1519_2047_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_2047_oct_pkt_num)},
+ {"mac_rx_2048_4095_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_2048_4095_oct_pkt_num)},
+ {"mac_rx_4096_8191_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_4096_8191_oct_pkt_num)},
+ {"mac_rx_8192_9216_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_8192_9216_oct_pkt_num)},
+ {"mac_rx_9217_12287_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_9217_12287_oct_pkt_num)},
+ {"mac_rx_12288_16383_oct_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_12288_16383_oct_pkt_num)},
+ {"mac_rx_1519_max_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_good_oct_pkt_num)},
+ {"mac_rx_1519_max_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_1519_max_bad_oct_pkt_num)},
+
+ {"mac_tx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_fragment_pkt_num)},
+ {"mac_tx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_undermin_pkt_num)},
+ {"mac_tx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_jabber_pkt_num)},
+ {"mac_tx_err_all_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_err_all_pkt_num)},
+ {"mac_tx_from_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_good_pkt_num)},
+ {"mac_tx_from_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_tx_from_app_bad_pkt_num)},
+ {"mac_rx_fragment_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fragment_pkt_num)},
+ {"mac_rx_undermin_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_undermin_pkt_num)},
+ {"mac_rx_jabber_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_jabber_pkt_num)},
+ {"mac_rx_fcs_err_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_fcs_err_pkt_num)},
+ {"mac_rx_send_app_good_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_good_pkt_num)},
+ {"mac_rx_send_app_bad_pkt_num",
+ HCLGE_MAC_STATS_FIELD_OFF(mac_rx_send_app_bad_pkt_num)}
+};
+
+static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
+ {
+ .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
+ .ethter_type = cpu_to_le16(ETH_P_LLDP),
+ .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
+ .i_port_bitmap = 0x1,
+ },
+};
+
+static const u8 hclge_hash_key[] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
+static const u32 hclge_dfx_bd_offset_list[] = {
+ HCLGE_DFX_BIOS_BD_OFFSET,
+ HCLGE_DFX_SSU_0_BD_OFFSET,
+ HCLGE_DFX_SSU_1_BD_OFFSET,
+ HCLGE_DFX_IGU_BD_OFFSET,
+ HCLGE_DFX_RPU_0_BD_OFFSET,
+ HCLGE_DFX_RPU_1_BD_OFFSET,
+ HCLGE_DFX_NCSI_BD_OFFSET,
+ HCLGE_DFX_RTC_BD_OFFSET,
+ HCLGE_DFX_PPP_BD_OFFSET,
+ HCLGE_DFX_RCB_BD_OFFSET,
+ HCLGE_DFX_TQP_BD_OFFSET,
+ HCLGE_DFX_SSU_2_BD_OFFSET
+};
+
+static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = {
+ HCLGE_OPC_DFX_BIOS_COMMON_REG,
+ HCLGE_OPC_DFX_SSU_REG_0,
+ HCLGE_OPC_DFX_SSU_REG_1,
+ HCLGE_OPC_DFX_IGU_EGU_REG,
+ HCLGE_OPC_DFX_RPU_REG_0,
+ HCLGE_OPC_DFX_RPU_REG_1,
+ HCLGE_OPC_DFX_NCSI_REG,
+ HCLGE_OPC_DFX_RTC_REG,
+ HCLGE_OPC_DFX_PPP_REG,
+ HCLGE_OPC_DFX_RCB_REG,
+ HCLGE_OPC_DFX_TQP_REG,
+ HCLGE_OPC_DFX_SSU_REG_2
+};
+
+static const struct key_info meta_data_key_info[] = {
+ { PACKET_TYPE_ID, 6},
+ { IP_FRAGEMENT, 1},
+ { ROCE_TYPE, 1},
+ { NEXT_KEY, 5},
+ { VLAN_NUMBER, 2},
+ { SRC_VPORT, 12},
+ { DST_VPORT, 12},
+ { TUNNEL_PACKET, 1},
+};
+
+static const struct key_info tuple_key_info[] = {
+ { OUTER_DST_MAC, 48},
+ { OUTER_SRC_MAC, 48},
+ { OUTER_VLAN_TAG_FST, 16},
+ { OUTER_VLAN_TAG_SEC, 16},
+ { OUTER_ETH_TYPE, 16},
+ { OUTER_L2_RSV, 16},
+ { OUTER_IP_TOS, 8},
+ { OUTER_IP_PROTO, 8},
+ { OUTER_SRC_IP, 32},
+ { OUTER_DST_IP, 32},
+ { OUTER_L3_RSV, 16},
+ { OUTER_SRC_PORT, 16},
+ { OUTER_DST_PORT, 16},
+ { OUTER_L4_RSV, 32},
+ { OUTER_TUN_VNI, 24},
+ { OUTER_TUN_FLOW_ID, 8},
+ { INNER_DST_MAC, 48},
+ { INNER_SRC_MAC, 48},
+ { INNER_VLAN_TAG_FST, 16},
+ { INNER_VLAN_TAG_SEC, 16},
+ { INNER_ETH_TYPE, 16},
+ { INNER_L2_RSV, 16},
+ { INNER_IP_TOS, 8},
+ { INNER_IP_PROTO, 8},
+ { INNER_SRC_IP, 32},
+ { INNER_DST_IP, 32},
+ { INNER_L3_RSV, 16},
+ { INNER_SRC_PORT, 16},
+ { INNER_DST_PORT, 16},
+ { INNER_L4_RSV, 32},
+};
+
+static int hclge_mac_update_stats_defective(struct hclge_dev *hdev)
+{
+#define HCLGE_MAC_CMD_NUM 21
+
+ u64 *data = (u64 *)(&hdev->mac_stats);
+ struct hclge_desc desc[HCLGE_MAC_CMD_NUM];
+ __le64 *desc_data;
+ int i, k, n;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_MAC_CMD_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get MAC pkt stats fail, status = %d.\n", ret);
+
+ return ret;
+ }
+
+ for (i = 0; i < HCLGE_MAC_CMD_NUM; i++) {
+ /* for special opcode 0032, only the first desc has the head */
+ if (unlikely(i == 0)) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_RD_FIRST_STATS_NUM;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_RD_OTHER_STATS_NUM;
+ }
+
+ for (k = 0; k < n; k++) {
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_mac_update_stats_complete(struct hclge_dev *hdev, u32 desc_num)
+{
+ u64 *data = (u64 *)(&hdev->mac_stats);
+ struct hclge_desc *desc;
+ __le64 *desc_data;
+ u16 i, k, n;
+ int ret;
+
+ /* This may be called inside atomic sections,
+ * so GFP_ATOMIC is more suitalbe here
+ */
+ desc = kcalloc(desc_num, sizeof(struct hclge_desc), GFP_ATOMIC);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_STATS_MAC_ALL, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, desc_num);
+ if (ret) {
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < desc_num; i++) {
+ /* for special opcode 0034, only the first desc has the head */
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_RD_FIRST_STATS_NUM;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_RD_OTHER_STATS_NUM;
+ }
+
+ for (k = 0; k < n; k++) {
+ *data += le64_to_cpu(*desc_data);
+ data++;
+ desc_data++;
+ }
+ }
+
+ kfree(desc);
+
+ return 0;
+}
+
+static int hclge_mac_query_reg_num(struct hclge_dev *hdev, u32 *desc_num)
+{
+ struct hclge_desc desc;
+ __le32 *desc_data;
+ u32 reg_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_MAC_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ desc_data = (__le32 *)(&desc.data[0]);
+ reg_num = le32_to_cpu(*desc_data);
+
+ *desc_num = 1 + ((reg_num - 3) >> 2) +
+ (u32)(((reg_num - 3) & 0x3) ? 1 : 0);
+
+ return 0;
+}
+
+static int hclge_mac_update_stats(struct hclge_dev *hdev)
+{
+ u32 desc_num;
+ int ret;
+
+ ret = hclge_mac_query_reg_num(hdev, &desc_num);
+
+ /* The firmware supports the new statistics acquisition method */
+ if (!ret)
+ ret = hclge_mac_update_stats_complete(hdev, desc_num);
+ else if (ret == -EOPNOTSUPP)
+ ret = hclge_mac_update_stats_defective(hdev);
+ else
+ dev_err(&hdev->pdev->dev, "query mac reg num fail!\n");
+
+ return ret;
+}
+
+static int hclge_tqps_update_stats(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_queue *queue;
+ struct hclge_desc desc[1];
+ struct hclge_tqp *tqp;
+ int ret, i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclge_tqp, q);
+ /* command : HCLGE_OPC_QUERY_IGU_STAT */
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_RX_STATS,
+ true);
+
+ desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc[0].data[1]);
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclge_tqp, q);
+ /* command : HCLGE_OPC_QUERY_IGU_STAT */
+ hclge_cmd_setup_basic_desc(&desc[0],
+ HCLGE_OPC_QUERY_TX_STATS,
+ true);
+
+ desc[0].data[0] = cpu_to_le32((tqp->index & 0x1ff));
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ ret, i);
+ return ret;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc[0].data[1]);
+ }
+
+ return 0;
+}
+
+static u64 *hclge_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_tqp *tqp;
+ u64 *buff = data;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclge_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+static int hclge_tqps_get_sset_count(struct hnae3_handle *handle, int stringset)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+
+ /* each tqp has TX & RX two queues */
+ return kinfo->num_tqps * (2);
+}
+
+static u8 *hclge_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u8 *buff = data;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_tqp *tqp = container_of(handle->kinfo.tqp[i],
+ struct hclge_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
+ tqp->index);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclge_tqp *tqp = container_of(kinfo->tqp[i],
+ struct hclge_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
+ tqp->index);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+static u64 *hclge_comm_get_stats(const void *comm_stats,
+ const struct hclge_comm_stats_str strs[],
+ int size, u64 *data)
+{
+ u64 *buf = data;
+ u32 i;
+
+ for (i = 0; i < size; i++)
+ buf[i] = HCLGE_STATS_READ(comm_stats, strs[i].offset);
+
+ return buf + size;
+}
+
+static u8 *hclge_comm_get_strings(u32 stringset,
+ const struct hclge_comm_stats_str strs[],
+ int size, u8 *data)
+{
+ char *buff = (char *)data;
+ u32 i;
+
+ if (stringset != ETH_SS_STATS)
+ return buff;
+
+ for (i = 0; i < size; i++) {
+ snprintf(buff, ETH_GSTRING_LEN, "%s", strs[i].desc);
+ buff = buff + ETH_GSTRING_LEN;
+ }
+
+ return (u8 *)buff;
+}
+
+static void hclge_update_stats_for_all(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle;
+ int status;
+
+ handle = &hdev->vport[0].nic;
+ if (handle->client) {
+ status = hclge_tqps_update_stats(handle);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Update TQPS stats fail, status = %d.\n",
+ status);
+ }
+ }
+
+ status = hclge_mac_update_stats(hdev);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Update MAC stats fail, status = %d.\n", status);
+}
+
+static void hclge_update_stats(struct hnae3_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (test_and_set_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state))
+ return;
+
+ status = hclge_mac_update_stats(hdev);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Update MAC stats fail, status = %d.\n",
+ status);
+
+ status = hclge_tqps_update_stats(handle);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Update TQPS stats fail, status = %d.\n",
+ status);
+
+ clear_bit(HCLGE_STATE_STATISTICS_UPDATING, &hdev->state);
+}
+
+static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset)
+{
+#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\
+ HNAE3_SUPPORT_PHY_LOOPBACK |\
+ HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\
+ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK)
+
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int count = 0;
+
+ /* Loopback test support rules:
+ * mac: only GE mode support
+ * serdes: all mac mode will support include GE/XGE/LGE/CGE
+ * phy: only support when phy device exist on board
+ */
+ if (stringset == ETH_SS_TEST) {
+ /* clear loopback bit flags at first */
+ handle->flags = (handle->flags & (~HCLGE_LOOPBACK_TEST_FLAGS));
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2 ||
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_10M ||
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_100M ||
+ hdev->hw.mac.speed == HCLGE_MAC_SPEED_1G) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_APP_LOOPBACK;
+ }
+
+ count += 2;
+ handle->flags |= HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK;
+ handle->flags |= HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK;
+
+ if (hdev->hw.mac.phydev && hdev->hw.mac.phydev->drv &&
+ hdev->hw.mac.phydev->drv->set_loopback) {
+ count += 1;
+ handle->flags |= HNAE3_SUPPORT_PHY_LOOPBACK;
+ }
+
+ } else if (stringset == ETH_SS_STATS) {
+ count = ARRAY_SIZE(g_mac_stats_string) +
+ hclge_tqps_get_sset_count(handle, stringset);
+ }
+
+ return count;
+}
+
+static void hclge_get_strings(struct hnae3_handle *handle, u32 stringset,
+ u8 *data)
+{
+ u8 *p = (char *)data;
+ int size;
+
+ if (stringset == ETH_SS_STATS) {
+ size = ARRAY_SIZE(g_mac_stats_string);
+ p = hclge_comm_get_strings(stringset, g_mac_stats_string,
+ size, p);
+ p = hclge_tqps_get_strings(handle, p);
+ } else if (stringset == ETH_SS_TEST) {
+ if (handle->flags & HNAE3_SUPPORT_APP_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_APP],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (handle->flags & HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_SERIAL_SERDES],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (handle->flags & HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) {
+ memcpy(p,
+ hns3_nic_test_strs[HNAE3_LOOP_PARALLEL_SERDES],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ if (handle->flags & HNAE3_SUPPORT_PHY_LOOPBACK) {
+ memcpy(p, hns3_nic_test_strs[HNAE3_LOOP_PHY],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void hclge_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u64 *p;
+
+ p = hclge_comm_get_stats(&hdev->mac_stats, g_mac_stats_string,
+ ARRAY_SIZE(g_mac_stats_string), data);
+ p = hclge_tqps_get_stats(handle, p);
+}
+
+static void hclge_get_mac_stat(struct hnae3_handle *handle,
+ struct hns3_mac_stats *mac_stats)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_update_stats(handle, NULL);
+
+ mac_stats->tx_pause_cnt = hdev->mac_stats.mac_tx_mac_pause_num;
+ mac_stats->rx_pause_cnt = hdev->mac_stats.mac_rx_mac_pause_num;
+}
+
+static int hclge_parse_func_status(struct hclge_dev *hdev,
+ struct hclge_func_status_cmd *status)
+{
+#define HCLGE_MAC_ID_MASK 0xF
+
+ if (!(status->pf_state & HCLGE_PF_STATE_DONE))
+ return -EINVAL;
+
+ /* Set the pf to main pf */
+ if (status->pf_state & HCLGE_PF_STATE_MAIN)
+ hdev->flag |= HCLGE_FLAG_MAIN;
+ else
+ hdev->flag &= ~HCLGE_FLAG_MAIN;
+
+ hdev->hw.mac.mac_id = status->mac_id & HCLGE_MAC_ID_MASK;
+ return 0;
+}
+
+static int hclge_query_function_status(struct hclge_dev *hdev)
+{
+#define HCLGE_QUERY_MAX_CNT 5
+
+ struct hclge_func_status_cmd *req;
+ struct hclge_desc desc;
+ int timeout = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FUNC_STATUS, true);
+ req = (struct hclge_func_status_cmd *)desc.data;
+
+ do {
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query function status failed %d.\n", ret);
+ return ret;
+ }
+
+ /* Check pf reset is done */
+ if (req->pf_state)
+ break;
+ usleep_range(1000, 2000);
+ } while (timeout++ < HCLGE_QUERY_MAX_CNT);
+
+ return hclge_parse_func_status(hdev, req);
+}
+
+static int hclge_query_pf_resource(struct hclge_dev *hdev)
+{
+ struct hclge_pf_res_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_PF_RSRC, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query pf resource failed %d.\n", ret);
+ return ret;
+ }
+
+ req = (struct hclge_pf_res_cmd *)desc.data;
+ hdev->num_tqps = le16_to_cpu(req->tqp_num);
+ hdev->pkt_buf_size = le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
+
+ if (req->tx_buf_size)
+ hdev->tx_buf_size =
+ le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
+ else
+ hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
+
+ hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
+
+ if (req->dv_buf_size)
+ hdev->dv_buf_size =
+ le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
+ else
+ hdev->dv_buf_size = HCLGE_DEFAULT_DV;
+
+ hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
+
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGE_MSIX_OFT_ROCEE_M, HCLGE_MSIX_OFT_ROCEE_S);
+ hdev->num_roce_msi =
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msi = hdev->num_roce_msi;
+
+ /* PF should have NIC vectors and Roce vectors,
+ * NIC vectors are queued before Roce vectors.
+ */
+ hdev->num_msi = hdev->num_roce_msi +
+ hdev->roce_base_msix_offset;
+ } else {
+ hdev->num_msi =
+ hnae3_get_field(le16_to_cpu(req->pf_intr_vector_number),
+ HCLGE_PF_VEC_NUM_M, HCLGE_PF_VEC_NUM_S);
+
+ hdev->num_nic_msi = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msi < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for pf(min:2).\n",
+ hdev->num_nic_msi);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_parse_speed(int speed_cmd, int *speed)
+{
+ switch (speed_cmd) {
+ case 6:
+ *speed = HCLGE_MAC_SPEED_10M;
+ break;
+ case 7:
+ *speed = HCLGE_MAC_SPEED_100M;
+ break;
+ case 0:
+ *speed = HCLGE_MAC_SPEED_1G;
+ break;
+ case 1:
+ *speed = HCLGE_MAC_SPEED_10G;
+ break;
+ case 2:
+ *speed = HCLGE_MAC_SPEED_25G;
+ break;
+ case 3:
+ *speed = HCLGE_MAC_SPEED_40G;
+ break;
+ case 4:
+ *speed = HCLGE_MAC_SPEED_50G;
+ break;
+ case 5:
+ *speed = HCLGE_MAC_SPEED_100G;
+ break;
+ case 8:
+ *speed = HCLGE_MAC_SPEED_200G;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 speed_ability = hdev->hw.mac.speed_ability;
+ u32 speed_bit = 0;
+
+ switch (speed) {
+ case HCLGE_MAC_SPEED_10M:
+ speed_bit = HCLGE_SUPPORT_10M_BIT;
+ break;
+ case HCLGE_MAC_SPEED_100M:
+ speed_bit = HCLGE_SUPPORT_100M_BIT;
+ break;
+ case HCLGE_MAC_SPEED_1G:
+ speed_bit = HCLGE_SUPPORT_1G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_10G:
+ speed_bit = HCLGE_SUPPORT_10G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ speed_bit = HCLGE_SUPPORT_25G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_40G:
+ speed_bit = HCLGE_SUPPORT_40G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_50G:
+ speed_bit = HCLGE_SUPPORT_50G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ speed_bit = HCLGE_SUPPORT_100G_BIT;
+ break;
+ case HCLGE_MAC_SPEED_200G:
+ speed_bit = HCLGE_SUPPORT_200G_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (speed_bit & speed_ability)
+ return 0;
+
+ return -EINVAL;
+}
+
+static void hclge_convert_setting_sr(struct hclge_mac *mac, u16 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseSR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT,
+ mac->supported);
+}
+
+static void hclge_convert_setting_lr(struct hclge_mac *mac, u16 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseLR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(
+ ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT,
+ mac->supported);
+}
+
+static void hclge_convert_setting_cr(struct hclge_mac *mac, u16 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseCR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT,
+ mac->supported);
+}
+
+static void hclge_convert_setting_kr(struct hclge_mac *mac, u16 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
+ mac->supported);
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT,
+ mac->supported);
+}
+
+static void hclge_convert_setting_fec(struct hclge_mac *mac)
+{
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT, mac->supported);
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+
+ switch (mac->speed) {
+ case HCLGE_MAC_SPEED_10G:
+ case HCLGE_MAC_SPEED_40G:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT,
+ mac->supported);
+ mac->fec_ability =
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_AUTO);
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ case HCLGE_MAC_SPEED_50G:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT,
+ mac->supported);
+ mac->fec_ability =
+ BIT(HNAE3_FEC_BASER) | BIT(HNAE3_FEC_RS) |
+ BIT(HNAE3_FEC_AUTO);
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ case HCLGE_MAC_SPEED_200G:
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT, mac->supported);
+ mac->fec_ability = BIT(HNAE3_FEC_RS) | BIT(HNAE3_FEC_AUTO);
+ break;
+ default:
+ mac->fec_ability = 0;
+ break;
+ }
+}
+
+static void hclge_parse_fiber_link_mode(struct hclge_dev *hdev,
+ u16 speed_ability)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseX_Full_BIT,
+ mac->supported);
+
+ hclge_convert_setting_sr(mac, speed_ability);
+ hclge_convert_setting_lr(mac, speed_ability);
+ hclge_convert_setting_cr(mac, speed_ability);
+ if (hnae3_dev_fec_supported(hdev))
+ hclge_convert_setting_fec(mac);
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+}
+
+static void hclge_parse_backplane_link_mode(struct hclge_dev *hdev,
+ u16 speed_ability)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ hclge_convert_setting_kr(mac, speed_ability);
+ if (hnae3_dev_fec_supported(hdev))
+ hclge_convert_setting_fec(mac);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, mac->supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT, mac->supported);
+}
+
+static void hclge_parse_copper_link_mode(struct hclge_dev *hdev,
+ u16 speed_ability)
+{
+ unsigned long *supported = hdev->hw.mac.supported;
+
+ /* default to support all speed for GE port */
+ if (!speed_ability)
+ speed_ability = HCLGE_SUPPORT_GE;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
+ supported);
+
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
+ supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT,
+ supported);
+ }
+
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, supported);
+ }
+
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, supported);
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, supported);
+}
+
+static void hclge_parse_link_mode(struct hclge_dev *hdev, u16 speed_ability)
+{
+ u8 media_type = hdev->hw.mac.media_type;
+
+ if (media_type == HNAE3_MEDIA_TYPE_FIBER)
+ hclge_parse_fiber_link_mode(hdev, speed_ability);
+ else if (media_type == HNAE3_MEDIA_TYPE_COPPER)
+ hclge_parse_copper_link_mode(hdev, speed_ability);
+ else if (media_type == HNAE3_MEDIA_TYPE_BACKPLANE)
+ hclge_parse_backplane_link_mode(hdev, speed_ability);
+}
+
+static u32 hclge_get_max_speed(u16 speed_ability)
+{
+ if (speed_ability & HCLGE_SUPPORT_200G_BIT)
+ return HCLGE_MAC_SPEED_200G;
+
+ if (speed_ability & HCLGE_SUPPORT_100G_BIT)
+ return HCLGE_MAC_SPEED_100G;
+
+ if (speed_ability & HCLGE_SUPPORT_50G_BIT)
+ return HCLGE_MAC_SPEED_50G;
+
+ if (speed_ability & HCLGE_SUPPORT_40G_BIT)
+ return HCLGE_MAC_SPEED_40G;
+
+ if (speed_ability & HCLGE_SUPPORT_25G_BIT)
+ return HCLGE_MAC_SPEED_25G;
+
+ if (speed_ability & HCLGE_SUPPORT_10G_BIT)
+ return HCLGE_MAC_SPEED_10G;
+
+ if (speed_ability & HCLGE_SUPPORT_1G_BIT)
+ return HCLGE_MAC_SPEED_1G;
+
+ if (speed_ability & HCLGE_SUPPORT_100M_BIT)
+ return HCLGE_MAC_SPEED_100M;
+
+ if (speed_ability & HCLGE_SUPPORT_10M_BIT)
+ return HCLGE_MAC_SPEED_10M;
+
+ return HCLGE_MAC_SPEED_1G;
+}
+
+static void hclge_parse_cfg(struct hclge_cfg *cfg, struct hclge_desc *desc)
+{
+#define SPEED_ABILITY_EXT_SHIFT 8
+
+ struct hclge_cfg_param_cmd *req;
+ u64 mac_addr_tmp_high;
+ u16 speed_ability_ext;
+ u64 mac_addr_tmp;
+ unsigned int i;
+
+ req = (struct hclge_cfg_param_cmd *)desc[0].data;
+
+ /* get the configuration */
+ cfg->vmdq_vport_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_VMDQ_M,
+ HCLGE_CFG_VMDQ_S);
+ cfg->tc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TC_NUM_M, HCLGE_CFG_TC_NUM_S);
+ cfg->tqp_desc_num = hnae3_get_field(__le32_to_cpu(req->param[0]),
+ HCLGE_CFG_TQP_DESC_N_M,
+ HCLGE_CFG_TQP_DESC_N_S);
+
+ cfg->phy_addr = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_PHY_ADDR_M,
+ HCLGE_CFG_PHY_ADDR_S);
+ cfg->media_type = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_MEDIA_TP_M,
+ HCLGE_CFG_MEDIA_TP_S);
+ cfg->rx_buf_len = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_RX_BUF_LEN_M,
+ HCLGE_CFG_RX_BUF_LEN_S);
+ /* get mac_address */
+ mac_addr_tmp = __le32_to_cpu(req->param[2]);
+ mac_addr_tmp_high = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_MAC_ADDR_H_M,
+ HCLGE_CFG_MAC_ADDR_H_S);
+
+ mac_addr_tmp |= (mac_addr_tmp_high << 31) << 1;
+
+ cfg->default_speed = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_DEFAULT_SPEED_M,
+ HCLGE_CFG_DEFAULT_SPEED_S);
+ cfg->rss_size_max = hnae3_get_field(__le32_to_cpu(req->param[3]),
+ HCLGE_CFG_RSS_SIZE_M,
+ HCLGE_CFG_RSS_SIZE_S);
+
+ for (i = 0; i < ETH_ALEN; i++)
+ cfg->mac_addr[i] = (mac_addr_tmp >> (8 * i)) & 0xff;
+
+ req = (struct hclge_cfg_param_cmd *)desc[1].data;
+ cfg->numa_node_map = __le32_to_cpu(req->param[0]);
+
+ cfg->speed_ability = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_M,
+ HCLGE_CFG_SPEED_ABILITY_S);
+ speed_ability_ext = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_SPEED_ABILITY_EXT_M,
+ HCLGE_CFG_SPEED_ABILITY_EXT_S);
+ cfg->speed_ability |= speed_ability_ext << SPEED_ABILITY_EXT_SHIFT;
+
+ cfg->umv_space = hnae3_get_field(__le32_to_cpu(req->param[1]),
+ HCLGE_CFG_UMV_TBL_SPACE_M,
+ HCLGE_CFG_UMV_TBL_SPACE_S);
+ if (!cfg->umv_space)
+ cfg->umv_space = HCLGE_DEFAULT_UMV_SPACE_PER_PF;
+}
+
+/* hclge_get_cfg: query the static parameter from flash
+ * @hdev: pointer to struct hclge_dev
+ * @hcfg: the config structure to be getted
+ */
+static int hclge_get_cfg(struct hclge_dev *hdev, struct hclge_cfg *hcfg)
+{
+ struct hclge_desc desc[HCLGE_PF_CFG_DESC_NUM];
+ struct hclge_cfg_param_cmd *req;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < HCLGE_PF_CFG_DESC_NUM; i++) {
+ u32 offset = 0;
+
+ req = (struct hclge_cfg_param_cmd *)desc[i].data;
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_CFG_PARAM,
+ true);
+ hnae3_set_field(offset, HCLGE_CFG_OFFSET_M,
+ HCLGE_CFG_OFFSET_S, i * HCLGE_CFG_RD_LEN_BYTES);
+ /* Len should be united by 4 bytes when send to hardware */
+ hnae3_set_field(offset, HCLGE_CFG_RD_LEN_M, HCLGE_CFG_RD_LEN_S,
+ HCLGE_CFG_RD_LEN_BYTES / HCLGE_CFG_RD_LEN_UNIT);
+ req->offset = cpu_to_le32(offset);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_PF_CFG_DESC_NUM);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "get config failed %d.\n", ret);
+ return ret;
+ }
+
+ hclge_parse_cfg(hcfg, desc);
+
+ return 0;
+}
+
+static void hclge_set_default_dev_specs(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_NON_TSO_BD_NUM 8U
+
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->dev_specs.max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGE_RSS_KEY_SIZE;
+ ae_dev->dev_specs.max_tm_rate = HCLGE_ETHER_MAX_RATE;
+}
+
+static void hclge_parse_dev_specs(struct hclge_dev *hdev,
+ struct hclge_desc *desc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclge_dev_specs_0_cmd *req0;
+
+ req0 = (struct hclge_dev_specs_0_cmd *)desc[0].data;
+
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ ae_dev->dev_specs.rss_ind_tbl_size =
+ le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+ ae_dev->dev_specs.max_tm_rate = le32_to_cpu(req0->max_tm_rate);
+}
+
+static void hclge_check_dev_specs(struct hclge_dev *hdev)
+{
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
+
+ if (!dev_specs->max_non_tso_bd_num)
+ dev_specs->max_non_tso_bd_num = HCLGE_MAX_NON_TSO_BD_NUM;
+ if (!dev_specs->rss_ind_tbl_size)
+ dev_specs->rss_ind_tbl_size = HCLGE_RSS_IND_TBL_SIZE;
+ if (!dev_specs->rss_key_size)
+ dev_specs->rss_key_size = HCLGE_RSS_KEY_SIZE;
+ if (!dev_specs->max_tm_rate)
+ dev_specs->max_tm_rate = HCLGE_ETHER_MAX_RATE;
+}
+
+static int hclge_query_dev_specs(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc[HCLGE_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ /* set default specifications as devices lower than version V3 do not
+ * support querying specifications from firmware.
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ hclge_set_default_dev_specs(hdev);
+ return 0;
+ }
+
+ for (i = 0; i < HCLGE_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_QUERY_DEV_SPECS, true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hclge_parse_dev_specs(hdev, desc);
+ hclge_check_dev_specs(hdev);
+
+ return 0;
+}
+
+static int hclge_get_cap(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_query_function_status(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query function status error %d.\n", ret);
+ return ret;
+ }
+
+ /* get pf resource */
+ return hclge_query_pf_resource(hdev);
+}
+
+static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
+{
+#define HCLGE_MIN_TX_DESC 64
+#define HCLGE_MIN_RX_DESC 64
+
+ if (!is_kdump_kernel())
+ return;
+
+ dev_info(&hdev->pdev->dev,
+ "Running kdump kernel. Using minimal resources\n");
+
+ /* minimal queue pairs equals to the number of vports */
+ hdev->num_tqps = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ hdev->num_tx_desc = HCLGE_MIN_TX_DESC;
+ hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
+}
+
+static int hclge_configure(struct hclge_dev *hdev)
+{
+ const struct cpumask *cpumask = cpu_online_mask;
+ struct hclge_cfg cfg;
+ unsigned int i;
+ int node, ret;
+
+ ret = hclge_get_cfg(hdev, &cfg);
+ if (ret)
+ return ret;
+
+ hdev->num_vmdq_vport = cfg.vmdq_vport_num;
+ hdev->base_tqp_pid = 0;
+ hdev->rss_size_max = cfg.rss_size_max;
+ hdev->rx_buf_len = cfg.rx_buf_len;
+ ether_addr_copy(hdev->hw.mac.mac_addr, cfg.mac_addr);
+ hdev->hw.mac.media_type = cfg.media_type;
+ hdev->hw.mac.phy_addr = cfg.phy_addr;
+ hdev->num_tx_desc = cfg.tqp_desc_num;
+ hdev->num_rx_desc = cfg.tqp_desc_num;
+ hdev->tm_info.num_pg = 1;
+ hdev->tc_max = cfg.tc_num;
+ hdev->tm_info.hw_pfc_map = 0;
+ hdev->wanted_umv_size = cfg.umv_space;
+
+ if (hnae3_dev_fd_supported(hdev)) {
+ hdev->fd_en = true;
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ }
+
+ ret = hclge_parse_speed(cfg.default_speed, &hdev->hw.mac.speed);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to parse speed %u, ret = %d\n",
+ cfg.default_speed, ret);
+ return ret;
+ }
+
+ hclge_parse_link_mode(hdev, cfg.speed_ability);
+
+ hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
+
+ if ((hdev->tc_max > HNAE3_MAX_TC) ||
+ (hdev->tc_max < 1)) {
+ dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
+ hdev->tc_max);
+ hdev->tc_max = 1;
+ }
+
+ /* Dev does not support DCB */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ hdev->tc_max = 1;
+ hdev->pfc_max = 0;
+ } else {
+ hdev->pfc_max = hdev->tc_max;
+ }
+
+ hdev->tm_info.num_tc = 1;
+
+ /* Currently not support uncontiuous tc */
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ hnae3_set_bit(hdev->hw_tc_map, i, 1);
+
+ hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
+
+ hclge_init_kdump_kernel_config(hdev);
+
+ /* Set the affinity based on numa node */
+ node = dev_to_node(&hdev->pdev->dev);
+ if (node != NUMA_NO_NODE)
+ cpumask = cpumask_of_node(node);
+
+ cpumask_copy(&hdev->affinity_mask, cpumask);
+
+ return ret;
+}
+
+static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min,
+ u16 tso_mss_max)
+{
+ struct hclge_cfg_tso_status_cmd *req;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TSO_GENERIC_CONFIG, false);
+
+ req = (struct hclge_cfg_tso_status_cmd *)desc.data;
+ req->tso_mss_min = cpu_to_le16(tso_mss_min);
+ req->tso_mss_max = cpu_to_le16(tso_mss_max);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_config_gro(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_cfg_gro_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
+ req = (struct hclge_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = en ? 1 : 0;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "GRO hardware config cmd failed, ret = %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_alloc_tqps(struct hclge_dev *hdev)
+{
+ struct hclge_tqp *tqp;
+ int i;
+
+ hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+ sizeof(struct hclge_tqp), GFP_KERNEL);
+ if (!hdev->htqp)
+ return -ENOMEM;
+
+ tqp = hdev->htqp;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp->dev = &hdev->pdev->dev;
+ tqp->index = i;
+
+ tqp->q.ae_algo = &ae_algo;
+ tqp->q.buf_size = hdev->rx_buf_len;
+ tqp->q.tx_desc_num = hdev->num_tx_desc;
+ tqp->q.rx_desc_num = hdev->num_rx_desc;
+ tqp->q.io_base = hdev->hw.io_base + HCLGE_TQP_REG_OFFSET +
+ i * HCLGE_TQP_REG_SIZE;
+
+ tqp++;
+ }
+
+ return 0;
+}
+
+static int hclge_map_tqps_to_func(struct hclge_dev *hdev, u16 func_id,
+ u16 tqp_pid, u16 tqp_vid, bool is_pf)
+{
+ struct hclge_tqp_map_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SET_TQP_MAP, false);
+
+ req = (struct hclge_tqp_map_cmd *)desc.data;
+ req->tqp_id = cpu_to_le16(tqp_pid);
+ req->tqp_vf = func_id;
+ req->tqp_flag = 1U << HCLGE_TQP_MAP_EN_B;
+ if (!is_pf)
+ req->tqp_flag |= 1U << HCLGE_TQP_MAP_TYPE_B;
+ req->tqp_vid = cpu_to_le16(tqp_vid);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "TQP map failed %d.\n", ret);
+
+ return ret;
+}
+
+static int hclge_assign_tqp(struct hclge_vport *vport, u16 num_tqps)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int i, alloced;
+
+ for (i = 0, alloced = 0; i < hdev->num_tqps &&
+ alloced < num_tqps; i++) {
+ if (!hdev->htqp[i].alloced) {
+ hdev->htqp[i].q.handle = &vport->nic;
+ hdev->htqp[i].q.tqp_index = alloced;
+ hdev->htqp[i].q.tx_desc_num = kinfo->num_tx_desc;
+ hdev->htqp[i].q.rx_desc_num = kinfo->num_rx_desc;
+ kinfo->tqp[alloced] = &hdev->htqp[i].q;
+ hdev->htqp[i].alloced = true;
+ alloced++;
+ }
+ }
+ vport->alloc_tqps = alloced;
+ kinfo->rss_size = min_t(u16, hdev->rss_size_max,
+ vport->alloc_tqps / hdev->tm_info.num_tc);
+
+ /* ensure one to one mapping between irq and queue at default */
+ kinfo->rss_size = min_t(u16, kinfo->rss_size,
+ (hdev->num_nic_msi - 1) / hdev->tm_info.num_tc);
+
+ return 0;
+}
+
+static int hclge_knic_setup(struct hclge_vport *vport, u16 num_tqps,
+ u16 num_tx_desc, u16 num_rx_desc)
+
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ kinfo->num_tx_desc = num_tx_desc;
+ kinfo->num_rx_desc = num_rx_desc;
+
+ kinfo->rx_buf_len = hdev->rx_buf_len;
+
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, num_tqps,
+ sizeof(struct hnae3_queue *), GFP_KERNEL);
+ if (!kinfo->tqp)
+ return -ENOMEM;
+
+ ret = hclge_assign_tqp(vport, num_tqps);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fail to assign TQPs %d.\n", ret);
+
+ return ret;
+}
+
+static int hclge_map_tqp_to_vport(struct hclge_dev *hdev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hnae3_knic_private_info *kinfo;
+ u16 i;
+
+ kinfo = &nic->kinfo;
+ for (i = 0; i < vport->alloc_tqps; i++) {
+ struct hclge_tqp *q =
+ container_of(kinfo->tqp[i], struct hclge_tqp, q);
+ bool is_pf;
+ int ret;
+
+ is_pf = !(vport->vport_id);
+ ret = hclge_map_tqps_to_func(hdev, vport->vport_id, q->index,
+ i, is_pf);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_map_tqp(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u16 i, num_vport;
+
+ num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+ for (i = 0; i < num_vport; i++) {
+ int ret;
+
+ ret = hclge_map_tqp_to_vport(hdev, vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps)
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ nic->pdev = hdev->pdev;
+ nic->ae_algo = &ae_algo;
+ nic->numa_node_mask = hdev->numa_node_mask;
+
+ ret = hclge_knic_setup(vport, num_tqps,
+ hdev->num_tx_desc, hdev->num_rx_desc);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "knic setup failed %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_alloc_vport(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_vport *vport;
+ u32 tqp_main_vport;
+ u32 tqp_per_vport;
+ int num_vport, i;
+ int ret;
+
+ /* We need to alloc a vport for main NIC of PF */
+ num_vport = hdev->num_vmdq_vport + hdev->num_req_vfs + 1;
+
+ if (hdev->num_tqps < num_vport) {
+ dev_err(&hdev->pdev->dev, "tqps(%u) is less than vports(%d)",
+ hdev->num_tqps, num_vport);
+ return -EINVAL;
+ }
+
+ /* Alloc the same number of TQPs for every vport */
+ tqp_per_vport = hdev->num_tqps / num_vport;
+ tqp_main_vport = tqp_per_vport + hdev->num_tqps % num_vport;
+
+ vport = devm_kcalloc(&pdev->dev, num_vport, sizeof(struct hclge_vport),
+ GFP_KERNEL);
+ if (!vport)
+ return -ENOMEM;
+
+ hdev->vport = vport;
+ hdev->num_alloc_vport = num_vport;
+
+ if (IS_ENABLED(CONFIG_PCI_IOV))
+ hdev->num_alloc_vfs = hdev->num_req_vfs;
+
+ for (i = 0; i < num_vport; i++) {
+ vport->back = hdev;
+ vport->vport_id = i;
+ vport->vf_info.link_state = IFLA_VF_LINK_STATE_AUTO;
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ vport->port_base_vlan_cfg.state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ vport->rxvlan_cfg.rx_vlan_offload_en = true;
+ INIT_LIST_HEAD(&vport->vlan_list);
+ INIT_LIST_HEAD(&vport->uc_mac_list);
+ INIT_LIST_HEAD(&vport->mc_mac_list);
+ spin_lock_init(&vport->mac_list_lock);
+
+ if (i == 0)
+ ret = hclge_vport_setup(vport, tqp_main_vport);
+ else
+ ret = hclge_vport_setup(vport, tqp_per_vport);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "vport setup failed for vport %d, %d\n",
+ i, ret);
+ return ret;
+ }
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_cmd_alloc_tx_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+/* TX buffer size is unit by 128 byte */
+#define HCLGE_BUF_SIZE_UNIT_SHIFT 7
+#define HCLGE_BUF_SIZE_UPDATE_EN_MSK BIT(15)
+ struct hclge_tx_buff_alloc_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+ u8 i;
+
+ req = (struct hclge_tx_buff_alloc_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, 0);
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ u32 buf_size = buf_alloc->priv_buf[i].tx_buf_size;
+
+ req->tx_pkt_buff[i] =
+ cpu_to_le16((buf_size >> HCLGE_BUF_SIZE_UNIT_SHIFT) |
+ HCLGE_BUF_SIZE_UPDATE_EN_MSK);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "tx buffer alloc cmd failed %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_tx_buffer_alloc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ int ret = hclge_cmd_alloc_tx_buff(hdev, buf_alloc);
+
+ if (ret)
+ dev_err(&hdev->pdev->dev, "tx buffer alloc failed %d\n", ret);
+
+ return ret;
+}
+
+static u32 hclge_get_tc_num(struct hclge_dev *hdev)
+{
+ unsigned int i;
+ u32 cnt = 0;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ if (hdev->hw_tc_map & BIT(i))
+ cnt++;
+ return cnt;
+}
+
+/* Get the number of pfc enabled TCs, which have private buffer */
+static int hclge_get_pfc_priv_num(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_priv_buf *priv;
+ unsigned int i;
+ int cnt = 0;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ priv = &buf_alloc->priv_buf[i];
+ if ((hdev->tm_info.hw_pfc_map & BIT(i)) &&
+ priv->enable)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+/* Get the number of pfc disabled TCs, which have private buffer */
+static int hclge_get_no_pfc_priv_num(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_priv_buf *priv;
+ unsigned int i;
+ int cnt = 0;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ priv = &buf_alloc->priv_buf[i];
+ if (hdev->hw_tc_map & BIT(i) &&
+ !(hdev->tm_info.hw_pfc_map & BIT(i)) &&
+ priv->enable)
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static u32 hclge_get_rx_priv_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_priv_buf *priv;
+ u32 rx_priv = 0;
+ int i;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ priv = &buf_alloc->priv_buf[i];
+ if (priv->enable)
+ rx_priv += priv->buf_size;
+ }
+ return rx_priv;
+}
+
+static u32 hclge_get_tx_buff_alloced(struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 i, total_tx_size = 0;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
+ total_tx_size += buf_alloc->priv_buf[i].tx_buf_size;
+
+ return total_tx_size;
+}
+
+static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc,
+ u32 rx_all)
+{
+ u32 shared_buf_min, shared_buf_tc, shared_std, hi_thrd, lo_thrd;
+ u32 tc_num = hclge_get_tc_num(hdev);
+ u32 shared_buf, aligned_mps;
+ u32 rx_priv;
+ int i;
+
+ aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
+
+ if (hnae3_dev_dcb_supported(hdev))
+ shared_buf_min = HCLGE_BUF_MUL_BY * aligned_mps +
+ hdev->dv_buf_size;
+ else
+ shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ + hdev->dv_buf_size;
+
+ shared_buf_tc = tc_num * aligned_mps + aligned_mps;
+ shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
+ HCLGE_BUF_SIZE_UNIT);
+
+ rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
+ if (rx_all < rx_priv + shared_std)
+ return false;
+
+ shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
+ buf_alloc->s_buf.buf_size = shared_buf;
+ if (hnae3_dev_dcb_supported(hdev)) {
+ buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
+ buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
+ - roundup(aligned_mps / HCLGE_BUF_DIV_BY,
+ HCLGE_BUF_SIZE_UNIT);
+ } else {
+ buf_alloc->s_buf.self.high = aligned_mps +
+ HCLGE_NON_DCB_ADDITIONAL_BUF;
+ buf_alloc->s_buf.self.low = aligned_mps;
+ }
+
+ if (hnae3_dev_dcb_supported(hdev)) {
+ hi_thrd = shared_buf - hdev->dv_buf_size;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ hi_thrd = hi_thrd * BUF_RESERVE_PERCENT
+ / BUF_MAX_PERCENT;
+
+ if (tc_num)
+ hi_thrd = hi_thrd / tc_num;
+
+ hi_thrd = max_t(u32, hi_thrd, HCLGE_BUF_MUL_BY * aligned_mps);
+ hi_thrd = rounddown(hi_thrd, HCLGE_BUF_SIZE_UNIT);
+ lo_thrd = hi_thrd - aligned_mps / HCLGE_BUF_DIV_BY;
+ } else {
+ hi_thrd = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF;
+ lo_thrd = aligned_mps;
+ }
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ buf_alloc->s_buf.tc_thrd[i].low = lo_thrd;
+ buf_alloc->s_buf.tc_thrd[i].high = hi_thrd;
+ }
+
+ return true;
+}
+
+static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 i, total_size;
+
+ total_size = hdev->pkt_buf_size;
+
+ /* alloc tx buffer for all enabled tc */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ if (hdev->hw_tc_map & BIT(i)) {
+ if (total_size < hdev->tx_buf_size)
+ return -ENOMEM;
+
+ priv->tx_buf_size = hdev->tx_buf_size;
+ } else {
+ priv->tx_buf_size = 0;
+ }
+
+ total_size -= priv->tx_buf_size;
+ }
+
+ return 0;
+}
+
+static bool hclge_rx_buf_calc_all(struct hclge_dev *hdev, bool max,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ u32 aligned_mps = round_up(hdev->mps, HCLGE_BUF_SIZE_UNIT);
+ unsigned int i;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ priv->enable = 1;
+
+ if (hdev->tm_info.hw_pfc_map & BIT(i)) {
+ priv->wl.low = max ? aligned_mps : HCLGE_BUF_SIZE_UNIT;
+ priv->wl.high = roundup(priv->wl.low + aligned_mps,
+ HCLGE_BUF_SIZE_UNIT);
+ } else {
+ priv->wl.low = 0;
+ priv->wl.high = max ? (aligned_mps * HCLGE_BUF_MUL_BY) :
+ aligned_mps;
+ }
+
+ priv->buf_size = priv->wl.high + hdev->dv_buf_size;
+ }
+
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
+}
+
+static bool hclge_drop_nopfc_buf_till_fit(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ int no_pfc_priv_num = hclge_get_no_pfc_priv_num(hdev, buf_alloc);
+ int i;
+
+ /* let the last to be cleared first */
+ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
+
+ if (hdev->hw_tc_map & mask &&
+ !(hdev->tm_info.hw_pfc_map & mask)) {
+ /* Clear the no pfc TC private buffer */
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+ priv->enable = 0;
+ no_pfc_priv_num--;
+ }
+
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
+ no_pfc_priv_num == 0)
+ break;
+ }
+
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
+}
+
+static bool hclge_drop_pfc_buf_till_fit(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ u32 rx_all = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ int pfc_priv_num = hclge_get_pfc_priv_num(hdev, buf_alloc);
+ int i;
+
+ /* let the last to be cleared first */
+ for (i = HCLGE_MAX_TC_NUM - 1; i >= 0; i--) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+ unsigned int mask = BIT((unsigned int)i);
+
+ if (hdev->hw_tc_map & mask &&
+ hdev->tm_info.hw_pfc_map & mask) {
+ /* Reduce the number of pfc TC with private buffer */
+ priv->wl.low = 0;
+ priv->enable = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+ pfc_priv_num--;
+ }
+
+ if (hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all) ||
+ pfc_priv_num == 0)
+ break;
+ }
+
+ return hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all);
+}
+
+static int hclge_only_alloc_priv_buff(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+#define COMPENSATE_BUFFER 0x3C00
+#define COMPENSATE_HALF_MPS_NUM 5
+#define PRIV_WL_GAP 0x1800
+
+ u32 rx_priv = hdev->pkt_buf_size - hclge_get_tx_buff_alloced(buf_alloc);
+ u32 tc_num = hclge_get_tc_num(hdev);
+ u32 half_mps = hdev->mps >> 1;
+ u32 min_rx_priv;
+ unsigned int i;
+
+ if (tc_num)
+ rx_priv = rx_priv / tc_num;
+
+ if (tc_num <= NEED_RESERVE_TC_NUM)
+ rx_priv = rx_priv * BUF_RESERVE_PERCENT / BUF_MAX_PERCENT;
+
+ min_rx_priv = hdev->dv_buf_size + COMPENSATE_BUFFER +
+ COMPENSATE_HALF_MPS_NUM * half_mps;
+ min_rx_priv = round_up(min_rx_priv, HCLGE_BUF_SIZE_UNIT);
+ rx_priv = round_down(rx_priv, HCLGE_BUF_SIZE_UNIT);
+
+ if (rx_priv < min_rx_priv)
+ return false;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ priv->enable = 0;
+ priv->wl.low = 0;
+ priv->wl.high = 0;
+ priv->buf_size = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ priv->enable = 1;
+ priv->buf_size = rx_priv;
+ priv->wl.high = rx_priv - hdev->dv_buf_size;
+ priv->wl.low = priv->wl.high - PRIV_WL_GAP;
+ }
+
+ buf_alloc->s_buf.buf_size = 0;
+
+ return true;
+}
+
+/* hclge_rx_buffer_calc: calculate the rx private buffer size for all TCs
+ * @hdev: pointer to struct hclge_dev
+ * @buf_alloc: pointer to buffer calculation data
+ * @return: 0: calculate sucessful, negative: fail
+ */
+static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ /* When DCB is not supported, rx private buffer is not allocated. */
+ if (!hnae3_dev_dcb_supported(hdev)) {
+ u32 rx_all = hdev->pkt_buf_size;
+
+ rx_all -= hclge_get_tx_buff_alloced(buf_alloc);
+ if (!hclge_is_rx_buf_ok(hdev, buf_alloc, rx_all))
+ return -ENOMEM;
+
+ return 0;
+ }
+
+ if (hclge_only_alloc_priv_buff(hdev, buf_alloc))
+ return 0;
+
+ if (hclge_rx_buf_calc_all(hdev, true, buf_alloc))
+ return 0;
+
+ /* try to decrease the buffer size */
+ if (hclge_rx_buf_calc_all(hdev, false, buf_alloc))
+ return 0;
+
+ if (hclge_drop_nopfc_buf_till_fit(hdev, buf_alloc))
+ return 0;
+
+ if (hclge_drop_pfc_buf_till_fit(hdev, buf_alloc))
+ return 0;
+
+ return -ENOMEM;
+}
+
+static int hclge_rx_priv_buf_alloc(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_rx_priv_buff_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+ int i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, false);
+ req = (struct hclge_rx_priv_buff_cmd *)desc.data;
+
+ /* Alloc private buffer TCs */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
+
+ req->buf_num[i] =
+ cpu_to_le16(priv->buf_size >> HCLGE_BUF_UNIT_S);
+ req->buf_num[i] |=
+ cpu_to_le16(1 << HCLGE_TC0_PRI_BUF_EN_B);
+ }
+
+ req->shared_buf =
+ cpu_to_le16((buf_alloc->s_buf.buf_size >> HCLGE_BUF_UNIT_S) |
+ (1 << HCLGE_TC0_PRI_BUF_EN_B));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "rx private buffer alloc cmd failed %d\n", ret);
+
+ return ret;
+}
+
+static int hclge_rx_priv_wl_config(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_rx_priv_wl_buf *req;
+ struct hclge_priv_buf *priv;
+ struct hclge_desc desc[2];
+ int i, j;
+ int ret;
+
+ for (i = 0; i < 2; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_RX_PRIV_WL_ALLOC,
+ false);
+ req = (struct hclge_rx_priv_wl_buf *)desc[i].data;
+
+ /* The first descriptor set the NEXT bit to 1 */
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
+ u32 idx = i * HCLGE_TC_NUM_ONE_DESC + j;
+
+ priv = &buf_alloc->priv_buf[idx];
+ req->tc_wl[j].high =
+ cpu_to_le16(priv->wl.high >> HCLGE_BUF_UNIT_S);
+ req->tc_wl[j].high |=
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+ req->tc_wl[j].low =
+ cpu_to_le16(priv->wl.low >> HCLGE_BUF_UNIT_S);
+ req->tc_wl[j].low |=
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+ }
+ }
+
+ /* Send 2 descriptor at one time */
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "rx private waterline config cmd failed %d\n",
+ ret);
+ return ret;
+}
+
+static int hclge_common_thrd_config(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_shared_buf *s_buf = &buf_alloc->s_buf;
+ struct hclge_rx_com_thrd *req;
+ struct hclge_desc desc[2];
+ struct hclge_tc_thrd *tc;
+ int i, j;
+ int ret;
+
+ for (i = 0; i < 2; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i],
+ HCLGE_OPC_RX_COM_THRD_ALLOC, false);
+ req = (struct hclge_rx_com_thrd *)&desc[i].data;
+
+ /* The first descriptor set the NEXT bit to 1 */
+ if (i == 0)
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ else
+ desc[i].flag &= ~cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ for (j = 0; j < HCLGE_TC_NUM_ONE_DESC; j++) {
+ tc = &s_buf->tc_thrd[i * HCLGE_TC_NUM_ONE_DESC + j];
+
+ req->com_thrd[j].high =
+ cpu_to_le16(tc->high >> HCLGE_BUF_UNIT_S);
+ req->com_thrd[j].high |=
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+ req->com_thrd[j].low =
+ cpu_to_le16(tc->low >> HCLGE_BUF_UNIT_S);
+ req->com_thrd[j].low |=
+ cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+ }
+ }
+
+ /* Send 2 descriptors at one time */
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "common threshold config cmd failed %d\n", ret);
+ return ret;
+}
+
+static int hclge_common_wl_config(struct hclge_dev *hdev,
+ struct hclge_pkt_buf_alloc *buf_alloc)
+{
+ struct hclge_shared_buf *buf = &buf_alloc->s_buf;
+ struct hclge_rx_com_wl *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, false);
+
+ req = (struct hclge_rx_com_wl *)desc.data;
+ req->com_wl.high = cpu_to_le16(buf->self.high >> HCLGE_BUF_UNIT_S);
+ req->com_wl.high |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+
+ req->com_wl.low = cpu_to_le16(buf->self.low >> HCLGE_BUF_UNIT_S);
+ req->com_wl.low |= cpu_to_le16(BIT(HCLGE_RX_PRIV_EN_B));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "common waterline config cmd failed %d\n", ret);
+
+ return ret;
+}
+
+int hclge_buffer_alloc(struct hclge_dev *hdev)
+{
+ struct hclge_pkt_buf_alloc *pkt_buf;
+ int ret;
+
+ pkt_buf = kzalloc(sizeof(*pkt_buf), GFP_KERNEL);
+ if (!pkt_buf)
+ return -ENOMEM;
+
+ ret = hclge_tx_buffer_calc(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not calc tx buffer size for all TCs %d\n", ret);
+ goto out;
+ }
+
+ ret = hclge_tx_buffer_alloc(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not alloc tx buffers %d\n", ret);
+ goto out;
+ }
+
+ ret = hclge_rx_buffer_calc(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not calc rx priv buffer size for all TCs %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = hclge_rx_priv_buf_alloc(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "could not alloc rx priv buffer %d\n",
+ ret);
+ goto out;
+ }
+
+ if (hnae3_dev_dcb_supported(hdev)) {
+ ret = hclge_rx_priv_wl_config(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not configure rx private waterline %d\n",
+ ret);
+ goto out;
+ }
+
+ ret = hclge_common_thrd_config(hdev, pkt_buf);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "could not configure common threshold %d\n",
+ ret);
+ goto out;
+ }
+ }
+
+ ret = hclge_common_wl_config(hdev, pkt_buf);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "could not configure common waterline %d\n", ret);
+
+out:
+ kfree(pkt_buf);
+ return ret;
+}
+
+static int hclge_init_roce_base_info(struct hclge_vport *vport)
+{
+ struct hnae3_handle *roce = &vport->roce;
+ struct hnae3_handle *nic = &vport->nic;
+
+ roce->rinfo.num_vectors = vport->back->num_roce_msi;
+
+ if (vport->back->num_msi_left < vport->roce.rinfo.num_vectors ||
+ vport->back->num_msi_left == 0)
+ return -EINVAL;
+
+ roce->rinfo.base_vector = vport->back->roce_base_vector;
+
+ roce->rinfo.netdev = nic->kinfo.netdev;
+ roce->rinfo.roce_io_base = vport->back->hw.io_base;
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+ roce->numa_node_mask = nic->numa_node_mask;
+
+ return 0;
+}
+
+static int hclge_init_msi(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int vectors;
+ int i;
+
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
+ }
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+
+ hdev->base_msi_vector = pdev->irq;
+ hdev->roce_base_vector = hdev->base_msi_vector +
+ hdev->roce_base_msix_offset;
+
+ hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(u16), GFP_KERNEL);
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hdev->num_msi; i++)
+ hdev->vector_status[i] = HCLGE_INVALID_VPORT;
+
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static u8 hclge_check_speed_dup(u8 duplex, int speed)
+{
+ if (!(speed == HCLGE_MAC_SPEED_10M || speed == HCLGE_MAC_SPEED_100M))
+ duplex = HCLGE_MAC_FULL;
+
+ return duplex;
+}
+
+static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed,
+ u8 duplex)
+{
+ struct hclge_config_mac_speed_dup_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_config_mac_speed_dup_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_SPEED_DUP, false);
+
+ if (duplex)
+ hnae3_set_bit(req->speed_dup, HCLGE_CFG_DUPLEX_B, 1);
+
+ switch (speed) {
+ case HCLGE_MAC_SPEED_10M:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 6);
+ break;
+ case HCLGE_MAC_SPEED_100M:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 7);
+ break;
+ case HCLGE_MAC_SPEED_1G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 0);
+ break;
+ case HCLGE_MAC_SPEED_10G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 1);
+ break;
+ case HCLGE_MAC_SPEED_25G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 2);
+ break;
+ case HCLGE_MAC_SPEED_40G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 3);
+ break;
+ case HCLGE_MAC_SPEED_50G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 4);
+ break;
+ case HCLGE_MAC_SPEED_100G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 5);
+ break;
+ case HCLGE_MAC_SPEED_200G:
+ hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M,
+ HCLGE_CFG_SPEED_S, 8);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed);
+ return -EINVAL;
+ }
+
+ hnae3_set_bit(req->mac_change_fec_en, HCLGE_CFG_MAC_SPEED_CHANGE_EN_B,
+ 1);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac speed/duplex config cmd failed %d.\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int ret;
+
+ duplex = hclge_check_speed_dup(duplex, speed);
+ if (!mac->support_autoneg && mac->speed == speed &&
+ mac->duplex == duplex)
+ return 0;
+
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, speed, duplex);
+ if (ret)
+ return ret;
+
+ hdev->hw.mac.speed = speed;
+ hdev->hw.mac.duplex = duplex;
+
+ return 0;
+}
+
+static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed,
+ u8 duplex)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+}
+
+static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable)
+{
+ struct hclge_config_auto_neg_cmd *req;
+ struct hclge_desc desc;
+ u32 flag = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_AN_MODE, false);
+
+ req = (struct hclge_config_auto_neg_cmd *)desc.data;
+ if (enable)
+ hnae3_set_bit(flag, HCLGE_MAC_CFG_AN_EN_B, 1U);
+ req->cfg_an_cmd_flag = cpu_to_le32(flag);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "auto neg set cmd failed %d.\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_set_autoneg(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hdev->hw.mac.support_autoneg) {
+ if (enable) {
+ dev_err(&hdev->pdev->dev,
+ "autoneg is not supported by current port\n");
+ return -EOPNOTSUPP;
+ } else {
+ return 0;
+ }
+ }
+
+ return hclge_set_autoneg_en(hdev, enable);
+}
+
+static int hclge_get_autoneg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (phydev)
+ return phydev->autoneg;
+
+ return hdev->hw.mac.autoneg;
+}
+
+static int hclge_restart_autoneg(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ dev_dbg(&hdev->pdev->dev, "restart autoneg\n");
+
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+ return hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
+static int hclge_halt_autoneg(struct hnae3_handle *handle, bool halt)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->hw.mac.support_autoneg && hdev->hw.mac.autoneg)
+ return hclge_set_autoneg_en(hdev, !halt);
+
+ return 0;
+}
+
+static int hclge_set_fec_hw(struct hclge_dev *hdev, u32 fec_mode)
+{
+ struct hclge_config_fec_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_FEC_MODE, false);
+
+ req = (struct hclge_config_fec_cmd *)desc.data;
+ if (fec_mode & BIT(HNAE3_FEC_AUTO))
+ hnae3_set_bit(req->fec_mode, HCLGE_MAC_CFG_FEC_AUTO_EN_B, 1);
+ if (fec_mode & BIT(HNAE3_FEC_RS))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_RS);
+ if (fec_mode & BIT(HNAE3_FEC_BASER))
+ hnae3_set_field(req->fec_mode, HCLGE_MAC_CFG_FEC_MODE_M,
+ HCLGE_MAC_CFG_FEC_MODE_S, HCLGE_MAC_FEC_BASER);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set fec mode failed %d.\n", ret);
+
+ return ret;
+}
+
+static int hclge_set_fec(struct hnae3_handle *handle, u32 fec_mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int ret;
+
+ if (fec_mode && !(mac->fec_ability & fec_mode)) {
+ dev_err(&hdev->pdev->dev, "unsupported fec mode\n");
+ return -EINVAL;
+ }
+
+ ret = hclge_set_fec_hw(hdev, fec_mode);
+ if (ret)
+ return ret;
+
+ mac->user_fec_mode = fec_mode | BIT(HNAE3_FEC_USER_DEF);
+ return 0;
+}
+
+static void hclge_get_fec(struct hnae3_handle *handle, u8 *fec_ability,
+ u8 *fec_mode)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ if (fec_ability)
+ *fec_ability = mac->fec_ability;
+ if (fec_mode)
+ *fec_mode = mac->fec_mode;
+}
+
+static int hclge_mac_init(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int ret;
+
+ hdev->support_sfp_query = true;
+ hdev->hw.mac.duplex = HCLGE_MAC_FULL;
+ ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed,
+ hdev->hw.mac.duplex);
+ if (ret)
+ return ret;
+
+ if (hdev->hw.mac.support_autoneg) {
+ ret = hclge_set_autoneg_en(hdev, hdev->hw.mac.autoneg);
+ if (ret)
+ return ret;
+ }
+
+ mac->link = 0;
+
+ if (mac->user_fec_mode & BIT(HNAE3_FEC_USER_DEF)) {
+ ret = hclge_set_fec_hw(hdev, mac->user_fec_mode);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclge_set_mac_mtu(hdev, hdev->mps);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_set_default_loopback(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "allocate buffer fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
+}
+
+static void hclge_reset_task_schedule(struct hclge_dev *hdev)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task, 0);
+}
+
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
+{
+ if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
+ mod_delayed_work_on(cpumask_first(&hdev->affinity_mask),
+ hclge_wq, &hdev->service_task,
+ delay_time);
+}
+
+static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
+{
+ struct hclge_link_status_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_STATUS, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "get link status cmd failed %d\n",
+ ret);
+ return ret;
+ }
+
+ req = (struct hclge_link_status_cmd *)desc.data;
+ *link_status = (req->status & HCLGE_LINK_STATUS_UP_M) > 0 ?
+ HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
+
+ return 0;
+}
+
+static int hclge_get_mac_phy_link(struct hclge_dev *hdev, int *link_status)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ *link_status = HCLGE_LINK_STATUS_DOWN;
+
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state))
+ return 0;
+
+ if (phydev && (phydev->state != PHY_RUNNING || !phydev->link))
+ return 0;
+
+ return hclge_get_mac_link_status(hdev, link_status);
+}
+
+static void hclge_update_link_status(struct hclge_dev *hdev)
+{
+ struct hnae3_client *rclient = hdev->roce_client;
+ struct hnae3_client *client = hdev->nic_client;
+ struct hnae3_handle *rhandle;
+ struct hnae3_handle *handle;
+ int state;
+ int ret;
+ int i;
+
+ if (!client)
+ return;
+
+ if (test_and_set_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
+ ret = hclge_get_mac_phy_link(hdev, &state);
+ if (ret) {
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+ return;
+ }
+
+ if (state != hdev->hw.mac.link) {
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ handle = &hdev->vport[i].nic;
+ client->ops->link_status_change(handle, state);
+ hclge_config_mac_tnl_int(hdev, state);
+ rhandle = &hdev->vport[i].roce;
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle,
+ state);
+ }
+ hdev->hw.mac.link = state;
+ }
+
+ clear_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state);
+}
+
+static void hclge_update_port_capability(struct hclge_mac *mac)
+{
+ /* update fec ability by speed */
+ hclge_convert_setting_fec(mac);
+
+ /* firmware can not identify back plane type, the media type
+ * read from configuration can help deal it
+ */
+ if (mac->media_type == HNAE3_MEDIA_TYPE_BACKPLANE &&
+ mac->module_type == HNAE3_MODULE_TYPE_UNKNOWN)
+ mac->module_type = HNAE3_MODULE_TYPE_KR;
+ else if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
+ mac->module_type = HNAE3_MODULE_TYPE_TP;
+
+ if (mac->support_autoneg) {
+ linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mac->supported);
+ linkmode_copy(mac->advertising, mac->supported);
+ } else {
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT,
+ mac->supported);
+ linkmode_zero(mac->advertising);
+ }
+}
+
+static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed)
+{
+ struct hclge_sfp_info_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "IMP do not support get SFP speed %d\n", ret);
+ return ret;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret);
+ return ret;
+ }
+
+ *speed = le32_to_cpu(resp->speed);
+
+ return 0;
+}
+
+static int hclge_get_sfp_info(struct hclge_dev *hdev, struct hclge_mac *mac)
+{
+ struct hclge_sfp_info_cmd *resp;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_INFO, true);
+ resp = (struct hclge_sfp_info_cmd *)desc.data;
+
+ resp->query_type = QUERY_ACTIVE_SPEED;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "IMP does not support get SFP info %d\n", ret);
+ return ret;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "get sfp info failed %d\n", ret);
+ return ret;
+ }
+
+ /* In some case, mac speed get from IMP may be 0, it shouldn't be
+ * set to mac->speed.
+ */
+ if (!le32_to_cpu(resp->speed))
+ return 0;
+
+ mac->speed = le32_to_cpu(resp->speed);
+ /* if resp->speed_ability is 0, it means it's an old version
+ * firmware, do not update these params
+ */
+ if (resp->speed_ability) {
+ mac->module_type = le32_to_cpu(resp->module_type);
+ mac->speed_ability = le32_to_cpu(resp->speed_ability);
+ mac->autoneg = resp->autoneg;
+ mac->support_autoneg = resp->autoneg_ability;
+ mac->speed_type = QUERY_ACTIVE_SPEED;
+ if (!resp->active_fec)
+ mac->fec_mode = 0;
+ else
+ mac->fec_mode = BIT(resp->active_fec);
+ } else {
+ mac->speed_type = QUERY_SFP_SPEED;
+ }
+
+ return 0;
+}
+
+static int hclge_update_port_info(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+ int speed = HCLGE_MAC_SPEED_UNKNOWN;
+ int ret;
+
+ /* get the port info from SFP cmd if not copper port */
+ if (mac->media_type == HNAE3_MEDIA_TYPE_COPPER)
+ return 0;
+
+ /* if IMP does not support get SFP/qSFP info, return directly */
+ if (!hdev->support_sfp_query)
+ return 0;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ ret = hclge_get_sfp_info(hdev, mac);
+ else
+ ret = hclge_get_sfp_speed(hdev, &speed);
+
+ if (ret == -EOPNOTSUPP) {
+ hdev->support_sfp_query = false;
+ return ret;
+ } else if (ret) {
+ return ret;
+ }
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ if (mac->speed_type == QUERY_ACTIVE_SPEED) {
+ hclge_update_port_capability(mac);
+ return 0;
+ }
+ return hclge_cfg_mac_speed_dup(hdev, mac->speed,
+ HCLGE_MAC_FULL);
+ } else {
+ if (speed == HCLGE_MAC_SPEED_UNKNOWN)
+ return 0; /* do nothing if no SFP */
+
+ /* must config full duplex for SFP */
+ return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL);
+ }
+}
+
+static int hclge_get_status(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ hclge_update_link_status(hdev);
+
+ return hdev->hw.mac.link;
+}
+
+static struct hclge_vport *hclge_get_vf_vport(struct hclge_dev *hdev, int vf)
+{
+ if (!pci_num_vf(hdev->pdev)) {
+ dev_err(&hdev->pdev->dev,
+ "SRIOV is disabled, can not get vport(%d) info.\n", vf);
+ return NULL;
+ }
+
+ if (vf < 0 || vf >= pci_num_vf(hdev->pdev)) {
+ dev_err(&hdev->pdev->dev,
+ "vf id(%d) is out of range(0 <= vfid < %d)\n",
+ vf, pci_num_vf(hdev->pdev));
+ return NULL;
+ }
+
+ /* VF start from 1 in vport */
+ vf += HCLGE_VF_VPORT_START_NUM;
+ return &hdev->vport[vf];
+}
+
+static int hclge_get_vf_config(struct hnae3_handle *handle, int vf,
+ struct ifla_vf_info *ivf)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ ivf->vf = vf;
+ ivf->linkstate = vport->vf_info.link_state;
+ ivf->spoofchk = vport->vf_info.spoofchk;
+ ivf->trusted = vport->vf_info.trusted;
+ ivf->min_tx_rate = 0;
+ ivf->max_tx_rate = vport->vf_info.max_tx_rate;
+ ivf->vlan = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ ivf->vlan_proto = htons(vport->port_base_vlan_cfg.vlan_info.vlan_proto);
+ ivf->qos = vport->port_base_vlan_cfg.vlan_info.qos;
+ ether_addr_copy(ivf->mac, vport->vf_info.mac);
+
+ return 0;
+}
+
+static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf,
+ int link_state)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ vport->vf_info.link_state = link_state;
+
+ return 0;
+}
+
+static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval)
+{
+ u32 cmdq_src_reg, msix_src_reg;
+
+ /* fetch the events from their corresponding regs */
+ cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG);
+ msix_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS);
+
+ /* Assumption: If by any chance reset and mailbox events are reported
+ * together then we will only process reset event in this go and will
+ * defer the processing of the mailbox events. Since, we would have not
+ * cleared RX CMDQ event this time we would receive again another
+ * interrupt from H/W just for the mailbox.
+ *
+ * check for vector0 reset event sources
+ */
+ if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & msix_src_reg) {
+ dev_info(&hdev->pdev->dev, "IMP reset interrupt\n");
+ set_bit(HNAE3_IMP_RESET, &hdev->reset_pending);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ hdev->rst_stats.imp_rst_cnt++;
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & msix_src_reg) {
+ dev_info(&hdev->pdev->dev, "global reset interrupt\n");
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending);
+ *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ hdev->rst_stats.global_rst_cnt++;
+ return HCLGE_VECTOR0_EVENT_RST;
+ }
+
+ /* check for vector0 msix event source */
+ if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) {
+ *clearval = msix_src_reg;
+ return HCLGE_VECTOR0_EVENT_ERR;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) {
+ cmdq_src_reg &= ~BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B);
+ *clearval = cmdq_src_reg;
+ return HCLGE_VECTOR0_EVENT_MBX;
+ }
+
+ /* print other vector0 event source */
+ dev_info(&hdev->pdev->dev,
+ "CMDQ INT status:0x%x, other INT status:0x%x\n",
+ cmdq_src_reg, msix_src_reg);
+ *clearval = msix_src_reg;
+
+ return HCLGE_VECTOR0_EVENT_OTHER;
+}
+
+static void hclge_clear_event_cause(struct hclge_dev *hdev, u32 event_type,
+ u32 regclr)
+{
+#define HCLGE_IMP_RESET_DELAY 5
+
+ switch (event_type) {
+ case HCLGE_VECTOR0_EVENT_RST:
+ if (regclr == BIT(HCLGE_VECTOR0_IMPRESET_INT_B))
+ mdelay(HCLGE_IMP_RESET_DELAY);
+
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG, regclr);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ hclge_write_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG, regclr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_clear_all_event_cause(struct hclge_dev *hdev)
+{
+ hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_RST,
+ BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) |
+ BIT(HCLGE_VECTOR0_CORERESET_INT_B) |
+ BIT(HCLGE_VECTOR0_IMPRESET_INT_B));
+ hclge_clear_event_cause(hdev, HCLGE_VECTOR0_EVENT_MBX, 0);
+}
+
+static void hclge_enable_vector(struct hclge_misc_vector *vector, bool enable)
+{
+ writel(enable ? 1 : 0, vector->addr);
+}
+
+static irqreturn_t hclge_misc_irq_handle(int irq, void *data)
+{
+ struct hclge_dev *hdev = data;
+ u32 clearval = 0;
+ u32 event_cause;
+
+ hclge_enable_vector(&hdev->misc_vector, false);
+ event_cause = hclge_check_event_cause(hdev, &clearval);
+
+ /* vector 0 interrupt is shared with reset and mailbox source events.*/
+ switch (event_cause) {
+ case HCLGE_VECTOR0_EVENT_ERR:
+ /* we do not know what type of reset is required now. This could
+ * only be decided after we fetch the type of errors which
+ * caused this event. Therefore, we will do below for now:
+ * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we
+ * have defered type of reset to be used.
+ * 2. Schedule the reset serivce task.
+ * 3. When service task receives HNAE3_UNKNOWN_RESET type it
+ * will fetch the correct type of reset. This would be done
+ * by first decoding the types of errors.
+ */
+ set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request);
+ fallthrough;
+ case HCLGE_VECTOR0_EVENT_RST:
+ hclge_reset_task_schedule(hdev);
+ break;
+ case HCLGE_VECTOR0_EVENT_MBX:
+ /* If we are here then,
+ * 1. Either we are not handling any mbx task and we are not
+ * scheduled as well
+ * OR
+ * 2. We could be handling a mbx task but nothing more is
+ * scheduled.
+ * In both cases, we should schedule mbx task as there are more
+ * mbx messages reported by this interrupt.
+ */
+ hclge_mbx_task_schedule(hdev);
+ break;
+ default:
+ dev_warn(&hdev->pdev->dev,
+ "received unknown or unhandled event of vector0\n");
+ break;
+ }
+
+ hclge_clear_event_cause(hdev, event_cause, clearval);
+
+ /* Enable interrupt if it is not cause by reset. And when
+ * clearval equal to 0, it means interrupt status may be
+ * cleared by hardware before driver reads status register.
+ * For this case, vector0 interrupt also should be enabled.
+ */
+ if (!clearval ||
+ event_cause == HCLGE_VECTOR0_EVENT_MBX) {
+ hclge_enable_vector(&hdev->misc_vector, true);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void hclge_free_vector(struct hclge_dev *hdev, int vector_id)
+{
+ if (hdev->vector_status[vector_id] == HCLGE_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
+ hdev->vector_status[vector_id] = HCLGE_INVALID_VPORT;
+ hdev->num_msi_left += 1;
+ hdev->num_msi_used -= 1;
+}
+
+static void hclge_get_misc_vector(struct hclge_dev *hdev)
+{
+ struct hclge_misc_vector *vector = &hdev->misc_vector;
+
+ vector->vector_irq = pci_irq_vector(hdev->pdev, 0);
+
+ vector->addr = hdev->hw.io_base + HCLGE_MISC_VECTOR_REG_BASE;
+ hdev->vector_status[0] = 0;
+
+ hdev->num_msi_left -= 1;
+ hdev->num_msi_used += 1;
+}
+
+static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
+ affinity_notify);
+
+ cpumask_copy(&hdev->affinity_mask, mask);
+}
+
+static void hclge_irq_affinity_release(struct kref *ref)
+{
+}
+
+static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
+{
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq,
+ &hdev->affinity_mask);
+
+ hdev->affinity_notify.notify = hclge_irq_affinity_notify;
+ hdev->affinity_notify.release = hclge_irq_affinity_release;
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
+ &hdev->affinity_notify);
+}
+
+static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
+{
+ irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
+ irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
+}
+
+static int hclge_misc_irq_init(struct hclge_dev *hdev)
+{
+ int ret;
+
+ hclge_get_misc_vector(hdev);
+
+ /* this would be explicitly freed in the end */
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGE_NAME, pci_name(hdev->pdev));
+ ret = request_irq(hdev->misc_vector.vector_irq, hclge_misc_irq_handle,
+ 0, hdev->misc_vector.name, hdev);
+ if (ret) {
+ hclge_free_vector(hdev, 0);
+ dev_err(&hdev->pdev->dev, "request misc irq(%d) fail\n",
+ hdev->misc_vector.vector_irq);
+ }
+
+ return ret;
+}
+
+static void hclge_misc_irq_uninit(struct hclge_dev *hdev)
+{
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclge_free_vector(hdev, 0);
+}
+
+int hclge_notify_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+ u16 i;
+
+ if (!test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state) || !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ struct hnae3_handle *handle = &hdev->vport[i].nic;
+ int ret;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify nic client failed %d(%d)\n", type, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_notify_roce_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ int ret;
+ u16 i;
+
+ if (!test_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state) || !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ struct hnae3_handle *handle = &hdev->vport[i].roce;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int hclge_reset_wait(struct hclge_dev *hdev)
+{
+#define HCLGE_RESET_WATI_MS 100
+#define HCLGE_RESET_WAIT_CNT 350
+
+ u32 val, reg, reg_bit;
+ u32 cnt = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_IMP_RESET_BIT;
+ break;
+ case HNAE3_GLOBAL_RESET:
+ reg = HCLGE_GLOBAL_RESET_REG;
+ reg_bit = HCLGE_GLOBAL_RESET_BIT;
+ break;
+ case HNAE3_FUNC_RESET:
+ reg = HCLGE_FUN_RST_ING;
+ reg_bit = HCLGE_FUN_RST_ING_B;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "Wait for unsupported reset type: %d\n",
+ hdev->reset_type);
+ return -EINVAL;
+ }
+
+ val = hclge_read_dev(&hdev->hw, reg);
+ while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) {
+ msleep(HCLGE_RESET_WATI_MS);
+ val = hclge_read_dev(&hdev->hw, reg);
+ cnt++;
+ }
+
+ if (cnt >= HCLGE_RESET_WAIT_CNT) {
+ dev_warn(&hdev->pdev->dev,
+ "Wait for reset timeout: %d\n", hdev->reset_type);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset)
+{
+ struct hclge_vf_rst_cmd *req;
+ struct hclge_desc desc;
+
+ req = (struct hclge_vf_rst_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false);
+ req->dest_vfid = func_id;
+
+ if (reset)
+ req->vf_rst = 0x1;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset)
+{
+ int i;
+
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to set/clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, reset);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set vf(%u) rst failed %d!\n",
+ vport->vport_id, ret);
+ return ret;
+ }
+
+ if (!reset || !test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ continue;
+
+ /* Inform VF to process the reset.
+ * hclge_inform_reset_assert_to_vf may fail if VF
+ * driver is not loaded.
+ */
+ ret = hclge_inform_reset_assert_to_vf(vport);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "inform reset to vf(%u) failed %d!\n",
+ vport->vport_id, ret);
+ }
+
+ return 0;
+}
+
+static void hclge_mailbox_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state) ||
+ test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state) ||
+ test_and_set_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ hclge_mbx_handler(hdev);
+
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_func_reset_sync_vf(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_sync_cmd *req;
+ struct hclge_desc desc;
+ int cnt = 0;
+ int ret;
+
+ req = (struct hclge_pf_rst_sync_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_VF_RST_RDY, true);
+
+ do {
+ /* vf need to down netdev by mbx during PF or FLR reset */
+ hclge_mailbox_service_task(hdev);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* for compatible with old firmware, wait
+ * 100 ms for VF to stop IO
+ */
+ if (ret == -EOPNOTSUPP) {
+ msleep(HCLGE_RESET_SYNC_TIME);
+ return;
+ } else if (ret) {
+ dev_warn(&hdev->pdev->dev, "sync with VF fail %d!\n",
+ ret);
+ return;
+ } else if (req->all_vf_ready) {
+ return;
+ }
+ msleep(HCLGE_PF_RESET_SYNC_TIME);
+ hclge_cmd_reuse_desc(&desc, true);
+ } while (cnt++ < HCLGE_PF_RESET_SYNC_CNT);
+
+ dev_warn(&hdev->pdev->dev, "sync with VF timeout!\n");
+}
+
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+ u16 i;
+
+ if (!client || !client->ops->process_hw_error ||
+ !test_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state))
+ return;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++)
+ client->ops->process_hw_error(&hdev->vport[i].nic, type);
+}
+
+static void hclge_handle_imp_error(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_RD_POISON_B)) {
+ hclge_report_hw_error(hdev, HNAE3_IMP_RD_POISON_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_RD_POISON_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+
+ if (reg_val & BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B)) {
+ hclge_report_hw_error(hdev, HNAE3_CMDQ_ECC_ERROR);
+ reg_val &= ~BIT(HCLGE_VECTOR0_IMP_CMDQ_ERR_B);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, reg_val);
+ }
+}
+
+int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id)
+{
+ struct hclge_desc desc;
+ struct hclge_reset_cmd *req = (struct hclge_reset_cmd *)desc.data;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_RST_TRIGGER, false);
+ hnae3_set_bit(req->mac_func_reset, HCLGE_CFG_RESET_FUNC_B, 1);
+ req->fun_reset_vfid = func_id;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "send function reset cmd fail, status =%d\n", ret);
+
+ return ret;
+}
+
+static void hclge_do_reset(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct pci_dev *pdev = hdev->pdev;
+ u32 val;
+
+ if (hclge_get_hw_reset_stat(handle)) {
+ dev_info(&pdev->dev, "hardware reset not finish\n");
+ dev_info(&pdev->dev, "func_rst_reg:0x%x, global_rst_reg:0x%x\n",
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING),
+ hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG));
+ return;
+ }
+
+ switch (hdev->reset_type) {
+ case HNAE3_GLOBAL_RESET:
+ dev_info(&pdev->dev, "global reset requested\n");
+ val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG);
+ hnae3_set_bit(val, HCLGE_GLOBAL_RESET_BIT, 1);
+ hclge_write_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG, val);
+ break;
+ case HNAE3_FUNC_RESET:
+ dev_info(&pdev->dev, "PF reset requested\n");
+ /* schedule again to check later */
+ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending);
+ hclge_reset_task_schedule(hdev);
+ break;
+ default:
+ dev_warn(&pdev->dev,
+ "unsupported reset type: %d\n", hdev->reset_type);
+ break;
+ }
+}
+
+static enum hnae3_reset_type hclge_get_reset_level(struct hnae3_ae_dev *ae_dev,
+ unsigned long *addr)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ /* first, resolve any unknown reset type to the known type(s) */
+ if (test_bit(HNAE3_UNKNOWN_RESET, addr)) {
+ u32 msix_sts_reg = hclge_read_dev(&hdev->hw,
+ HCLGE_MISC_VECTOR_INT_STS);
+ /* we will intentionally ignore any errors from this function
+ * as we will end up in *some* reset request in any case
+ */
+ if (hclge_handle_hw_msix_error(hdev, addr))
+ dev_info(&hdev->pdev->dev, "received msix interrupt 0x%x\n",
+ msix_sts_reg);
+
+ clear_bit(HNAE3_UNKNOWN_RESET, addr);
+ /* We defered the clearing of the error event which caused
+ * interrupt since it was not posssible to do that in
+ * interrupt context (and this is the reason we introduced
+ * new UNKNOWN reset type). Now, the errors have been
+ * handled and cleared in hardware we can safely enable
+ * interrupts. This is an exception to the norm.
+ */
+ hclge_enable_vector(&hdev->misc_vector, true);
+ }
+
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_IMP_RESET, addr)) {
+ rst_level = HNAE3_IMP_RESET;
+ clear_bit(HNAE3_IMP_RESET, addr);
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) {
+ rst_level = HNAE3_GLOBAL_RESET;
+ clear_bit(HNAE3_GLOBAL_RESET, addr);
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FUNC_RESET, addr)) {
+ rst_level = HNAE3_FUNC_RESET;
+ clear_bit(HNAE3_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
+
+ if (hdev->reset_type != HNAE3_NONE_RESET &&
+ rst_level < hdev->reset_type)
+ return HNAE3_NONE_RESET;
+
+ return rst_level;
+}
+
+static void hclge_clear_reset_cause(struct hclge_dev *hdev)
+{
+ u32 clearval = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_IMP_RESET:
+ clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B);
+ break;
+ case HNAE3_GLOBAL_RESET:
+ clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B);
+ break;
+ default:
+ break;
+ }
+
+ if (!clearval)
+ return;
+
+ /* For revision 0x20, the reset interrupt source
+ * can only be cleared after hardware reset done
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ hclge_write_dev(&hdev->hw, HCLGE_MISC_RESET_STS_REG,
+ clearval);
+
+ hclge_enable_vector(&hdev->misc_vector, true);
+}
+
+static void hclge_reset_handshake(struct hclge_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGE_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGE_NIC_SW_RST_RDY;
+
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CSQ_DEPTH_REG, reg_val);
+}
+
+static int hclge_func_reset_notify_vf(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_set_all_vf_rst(hdev, true);
+ if (ret)
+ return ret;
+
+ hclge_func_reset_sync_vf(hdev);
+
+ return 0;
+}
+
+static int hclge_reset_prepare_wait(struct hclge_dev *hdev)
+{
+ u32 reg_val;
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ ret = hclge_func_reset_notify_vf(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_func_reset_cmd(hdev, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "asserting function reset fail %d!\n", ret);
+ return ret;
+ }
+
+ /* After performaning pf reset, it is not necessary to do the
+ * mailbox handling or send any command to firmware, because
+ * any mailbox handling or command to firmware is only valid
+ * after hclge_cmd_init is called.
+ */
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ hdev->rst_stats.pf_rst_cnt++;
+ break;
+ case HNAE3_FLR_RESET:
+ ret = hclge_func_reset_notify_vf(hdev);
+ if (ret)
+ return ret;
+ break;
+ case HNAE3_IMP_RESET:
+ hclge_handle_imp_error(hdev);
+ reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG);
+ hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG,
+ BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val);
+ break;
+ default:
+ break;
+ }
+
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGE_RESET_SYNC_TIME);
+ hclge_reset_handshake(hdev, true);
+ dev_info(&hdev->pdev->dev, "prepare wait ok\n");
+
+ return ret;
+}
+
+static bool hclge_reset_err_handle(struct hclge_dev *hdev)
+{
+#define MAX_RESET_FAIL_CNT 5
+
+ if (hdev->reset_pending) {
+ dev_info(&hdev->pdev->dev, "Reset pending %lu\n",
+ hdev->reset_pending);
+ return true;
+ } else if (hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS) &
+ HCLGE_RESET_INT_M) {
+ dev_info(&hdev->pdev->dev,
+ "reset failed because new reset interrupt\n");
+ hclge_clear_reset_cause(hdev);
+ return false;
+ } else if (hdev->rst_stats.reset_fail_cnt < MAX_RESET_FAIL_CNT) {
+ hdev->rst_stats.reset_fail_cnt++;
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+ dev_info(&hdev->pdev->dev,
+ "re-schedule reset task(%u)\n",
+ hdev->rst_stats.reset_fail_cnt);
+ return true;
+ }
+
+ hclge_clear_reset_cause(hdev);
+
+ /* recover the handshake status when reset fail */
+ hclge_reset_handshake(hdev, true);
+
+ dev_err(&hdev->pdev->dev, "Reset fail!\n");
+
+ hclge_dbg_dump_rst_info(hdev);
+
+ set_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
+ return false;
+}
+
+static int hclge_set_rst_done(struct hclge_dev *hdev)
+{
+ struct hclge_pf_rst_done_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_pf_rst_done_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PF_RST_DONE, false);
+ req->pf_rst_done |= HCLGE_PF_RESET_DONE_BIT;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* To be compatible with the old firmware, which does not support
+ * command HCLGE_OPC_PF_RST_DONE, just print a warning and
+ * return success
+ */
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "current firmware does not support command(0x%x)!\n",
+ HCLGE_OPC_PF_RST_DONE);
+ return 0;
+ } else if (ret) {
+ dev_err(&hdev->pdev->dev, "assert PF reset done fail %d!\n",
+ ret);
+ }
+
+ return ret;
+}
+
+static int hclge_reset_prepare_up(struct hclge_dev *hdev)
+{
+ int ret = 0;
+
+ switch (hdev->reset_type) {
+ case HNAE3_FUNC_RESET:
+ case HNAE3_FLR_RESET:
+ ret = hclge_set_all_vf_rst(hdev, false);
+ break;
+ case HNAE3_GLOBAL_RESET:
+ case HNAE3_IMP_RESET:
+ ret = hclge_set_rst_done(hdev);
+ break;
+ default:
+ break;
+ }
+
+ /* clear up the handshake status after re-initialize done */
+ hclge_reset_handshake(hdev, false);
+
+ return ret;
+}
+
+static int hclge_reset_stack(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ ret = hclge_reset_ae_dev(hdev->ae_dev);
+ if (ret)
+ return ret;
+
+ return hclge_notify_client(hdev, HNAE3_INIT_CLIENT);
+}
+
+static int hclge_reset_prepare(struct hclge_dev *hdev)
+{
+ int ret;
+
+ hdev->rst_stats.reset_cnt++;
+ /* perform reset of the stack & ae device for a client */
+ ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ return hclge_reset_prepare_wait(hdev);
+}
+
+static int hclge_reset_rebuild(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ enum hnae3_reset_type reset_level;
+ int ret;
+
+ hdev->rst_stats.hw_reset_done_cnt++;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ ret = hclge_reset_stack(hdev);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ hclge_clear_reset_cause(hdev);
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ /* ignore RoCE notify error if it fails HCLGE_RESET_MAX_FAIL_CNT - 1
+ * times
+ */
+ if (ret &&
+ hdev->rst_stats.reset_fail_cnt < HCLGE_RESET_MAX_FAIL_CNT - 1)
+ return ret;
+
+ ret = hclge_reset_prepare_up(hdev);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ return ret;
+
+ hdev->last_reset_time = jiffies;
+ hdev->rst_stats.reset_fail_cnt = 0;
+ hdev->rst_stats.reset_done_cnt++;
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+
+ /* if default_reset_request has a higher level reset request,
+ * it should be handled as soon as possible. since some errors
+ * need this kind of reset to fix.
+ */
+ reset_level = hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ if (reset_level != HNAE3_NONE_RESET)
+ set_bit(reset_level, &hdev->reset_request);
+
+ return 0;
+}
+
+static void hclge_reset(struct hclge_dev *hdev)
+{
+ if (hclge_reset_prepare(hdev))
+ goto err_reset;
+
+ if (hclge_reset_wait(hdev))
+ goto err_reset;
+
+ if (hclge_reset_rebuild(hdev))
+ goto err_reset;
+
+ return;
+
+err_reset:
+ if (hclge_reset_err_handle(hdev))
+ hclge_reset_task_schedule(hdev);
+}
+
+static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ /* We might end up getting called broadly because of 2 below cases:
+ * 1. Recoverable error was conveyed through APEI and only way to bring
+ * normalcy is to reset.
+ * 2. A new reset request from the stack due to timeout
+ *
+ * For the first case,error event might not have ae handle available.
+ * check if this is a new reset request and we are not here just because
+ * last reset attempt did not succeed and watchdog hit us again. We will
+ * know this if last reset request did not occur very recently (watchdog
+ * timer = 5*HZ, let us check after sufficiently large time, say 4*5*Hz)
+ * In case of new request we reset the "reset level" to PF reset.
+ * And if it is a repeat reset request of the most recent one then we
+ * want to make sure we throttle the reset request. Therefore, we will
+ * not allow it again before 3*HZ times.
+ */
+ if (!handle)
+ handle = &hdev->vport[0].nic;
+
+ if (time_before(jiffies, (hdev->last_reset_time +
+ HCLGE_RESET_INTERVAL))) {
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
+ return;
+ } else if (hdev->default_reset_request) {
+ hdev->reset_level =
+ hclge_get_reset_level(ae_dev,
+ &hdev->default_reset_request);
+ } else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) {
+ hdev->reset_level = HNAE3_FUNC_RESET;
+ }
+
+ dev_info(&hdev->pdev->dev, "received reset event, reset type is %d\n",
+ hdev->reset_level);
+
+ /* request reset & schedule reset task */
+ set_bit(hdev->reset_level, &hdev->reset_request);
+ hclge_reset_task_schedule(hdev);
+
+ if (hdev->reset_level < HNAE3_GLOBAL_RESET)
+ hdev->reset_level++;
+}
+
+static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclge_reset_timer(struct timer_list *t)
+{
+ struct hclge_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ /* if default_reset_request has no value, it means that this reset
+ * request has already be handled, so just return here
+ */
+ if (!hdev->default_reset_request)
+ return;
+
+ dev_info(&hdev->pdev->dev,
+ "triggering reset in reset timer\n");
+ hclge_reset_event(hdev->pdev, NULL);
+}
+
+static void hclge_reset_subtask(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ /* check if there is any ongoing reset in the hardware. This status can
+ * be checked from reset_pending. If there is then, we need to wait for
+ * hardware to complete reset.
+ * a. If we are able to figure out in reasonable time that hardware
+ * has fully resetted then, we can proceed with driver, client
+ * reset.
+ * b. else, we can come back later to check this status so re-sched
+ * now.
+ */
+ hdev->last_reset_time = jiffies;
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_reset(hdev);
+
+ /* check if we got any *new* reset requests to be honored */
+ hdev->reset_type = hclge_get_reset_level(ae_dev, &hdev->reset_request);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclge_do_reset(hdev);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+}
+
+static void hclge_reset_service_task(struct hclge_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
+ return;
+
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+
+ hclge_reset_subtask(hdev);
+
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+}
+
+static void hclge_update_vport_alive(struct hclge_dev *hdev)
+{
+ int i;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 1; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ))
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+
+ /* If vf is not alive, set to default value */
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state))
+ vport->mps = HCLGE_MAC_DEFAULT_FRAME;
+ }
+}
+
+static void hclge_periodic_service_task(struct hclge_dev *hdev)
+{
+ unsigned long delta = round_jiffies_relative(HZ);
+
+ if (test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
+ return;
+
+ /* Always handle the link updating to make sure link state is
+ * updated when it is triggered by mbx.
+ */
+ hclge_update_link_status(hdev);
+ hclge_sync_mac_table(hdev);
+ hclge_sync_promisc_mode(hdev);
+
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
+
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
+ }
+
+ hdev->serv_processed_cnt++;
+ hclge_update_vport_alive(hdev);
+
+ if (test_bit(HCLGE_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGE_STATS_TIMER_INTERVAL))
+ hclge_update_stats_for_all(hdev);
+
+ hclge_update_port_info(hdev);
+ hclge_sync_vlan_filter(hdev);
+
+ if (!(hdev->serv_processed_cnt % HCLGE_ARFS_EXPIRE_INTERVAL))
+ hclge_rfs_filter_expire(hdev);
+
+ hdev->last_serv_processed = jiffies;
+
+out:
+ hclge_task_schedule(hdev, delta);
+}
+
+static void hclge_service_task(struct work_struct *work)
+{
+ struct hclge_dev *hdev =
+ container_of(work, struct hclge_dev, service_task.work);
+
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
+ hclge_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclge_task_schedule() in
+ * hclge_periodic_service_task().
+ */
+ hclge_reset_service_task(hdev);
+ hclge_mailbox_service_task(hdev);
+}
+
+struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle)
+{
+ /* VF handle has no client */
+ if (!handle->client)
+ return container_of(handle, struct hclge_vport, nic);
+ else if (handle->client->type == HNAE3_CLIENT_ROCE)
+ return container_of(handle, struct hclge_vport, roce);
+ else
+ return container_of(handle, struct hclge_vport, nic);
+}
+
+static int hclge_get_vector(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_vector_info *vector = vector_info;
+ struct hclge_dev *hdev = vport->back;
+ int alloc = 0;
+ int i, j;
+
+ vector_num = min_t(u16, hdev->num_nic_msi - 1, vector_num);
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+ for (i = 1; i < hdev->num_msi; i++) {
+ if (hdev->vector_status[i] == HCLGE_INVALID_VPORT) {
+ vector->vector = pci_irq_vector(hdev->pdev, i);
+ vector->io_addr = hdev->hw.io_base +
+ HCLGE_VECTOR_REG_BASE +
+ (i - 1) * HCLGE_VECTOR_REG_OFFSET +
+ vport->vport_id *
+ HCLGE_VECTOR_VF_OFFSET;
+ hdev->vector_status[i] = vport->vport_id;
+ hdev->vector_irq[i] = vector->vector;
+
+ vector++;
+ alloc++;
+
+ break;
+ }
+ }
+ }
+ hdev->num_msi_left -= alloc;
+ hdev->num_msi_used += alloc;
+
+ return alloc;
+}
+
+static int hclge_get_vector_index(struct hclge_dev *hdev, int vector)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int hclge_put_vector(struct hnae3_handle *handle, int vector)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int vector_id;
+
+ vector_id = hclge_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&hdev->pdev->dev,
+ "Get vector index fail. vector = %d\n", vector);
+ return vector_id;
+ }
+
+ hclge_free_vector(hdev, vector_id);
+
+ return 0;
+}
+
+static u32 hclge_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGE_RSS_KEY_SIZE;
+}
+
+static u32 hclge_get_rss_indir_size(struct hnae3_handle *handle)
+{
+ return HCLGE_RSS_IND_TBL_SIZE;
+}
+
+static int hclge_set_rss_algo_key(struct hclge_dev *hdev,
+ const u8 hfunc, const u8 *key)
+{
+ struct hclge_rss_config_cmd *req;
+ unsigned int key_offset = 0;
+ struct hclge_desc desc;
+ int key_counts;
+ int key_size;
+ int ret;
+
+ key_counts = HCLGE_RSS_KEY_SIZE;
+ req = (struct hclge_rss_config_cmd *)desc.data;
+
+ while (key_counts) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_GENERIC_CONFIG,
+ false);
+
+ req->hash_config |= (hfunc & HCLGE_RSS_HASH_ALGO_MASK);
+ req->hash_config |= (key_offset << HCLGE_RSS_HASH_KEY_OFFSET_B);
+
+ key_size = min(HCLGE_RSS_HASH_KEY_NUM, key_counts);
+ memcpy(req->hash_key,
+ key + key_offset * HCLGE_RSS_HASH_KEY_NUM, key_size);
+
+ key_counts -= key_size;
+ key_offset++;
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Configure RSS config fail, status = %d\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int hclge_set_rss_indir_table(struct hclge_dev *hdev, const u8 *indir)
+{
+ struct hclge_rss_indirection_table_cmd *req;
+ struct hclge_desc desc;
+ int i, j;
+ int ret;
+
+ req = (struct hclge_rss_indirection_table_cmd *)desc.data;
+
+ for (i = 0; i < HCLGE_RSS_CFG_TBL_NUM; i++) {
+ hclge_cmd_setup_basic_desc
+ (&desc, HCLGE_OPC_RSS_INDIR_TABLE, false);
+
+ req->start_table_index =
+ cpu_to_le16(i * HCLGE_RSS_CFG_TBL_SIZE);
+ req->rss_set_bitmap = cpu_to_le16(HCLGE_RSS_SET_BITMAP_MSK);
+
+ for (j = 0; j < HCLGE_RSS_CFG_TBL_SIZE; j++)
+ req->rss_result[j] =
+ indir[i * HCLGE_RSS_CFG_TBL_SIZE + j];
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Configure rss indir table fail,status = %d\n",
+ ret);
+ return ret;
+ }
+ }
+ return 0;
+}
+
+static int hclge_set_rss_tc_mode(struct hclge_dev *hdev, u16 *tc_valid,
+ u16 *tc_size, u16 *tc_offset)
+{
+ struct hclge_rss_tc_mode_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+ int i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_TC_MODE, false);
+ req = (struct hclge_rss_tc_mode_cmd *)desc.data;
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ u16 mode = 0;
+
+ hnae3_set_bit(mode, HCLGE_RSS_TC_VALID_B, (tc_valid[i] & 0x1));
+ hnae3_set_field(mode, HCLGE_RSS_TC_SIZE_M,
+ HCLGE_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(mode, HCLGE_RSS_TC_OFFSET_M,
+ HCLGE_RSS_TC_OFFSET_S, tc_offset[i]);
+
+ req->rss_tc_mode[i] = cpu_to_le16(mode);
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Configure rss tc mode fail, status = %d\n", ret);
+
+ return ret;
+}
+
+static void hclge_get_rss_type(struct hclge_vport *vport)
+{
+ if (vport->rss_tuple_sets.ipv4_tcp_en ||
+ vport->rss_tuple_sets.ipv4_udp_en ||
+ vport->rss_tuple_sets.ipv4_sctp_en ||
+ vport->rss_tuple_sets.ipv6_tcp_en ||
+ vport->rss_tuple_sets.ipv6_udp_en ||
+ vport->rss_tuple_sets.ipv6_sctp_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L4;
+ else if (vport->rss_tuple_sets.ipv4_fragment_en ||
+ vport->rss_tuple_sets.ipv6_fragment_en)
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_L3;
+ else
+ vport->nic.kinfo.rss_type = PKT_HASH_TYPE_NONE;
+}
+
+static int hclge_set_rss_input_tuple(struct hclge_dev *hdev)
+{
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+
+ /* Get the tuple cfg from pf */
+ req->ipv4_tcp_en = hdev->vport[0].rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = hdev->vport[0].rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = hdev->vport[0].rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = hdev->vport[0].rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = hdev->vport[0].rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = hdev->vport[0].rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = hdev->vport[0].rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = hdev->vport[0].rss_tuple_sets.ipv6_fragment_en;
+ hclge_get_rss_type(&hdev->vport[0]);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Configure rss input fail, status = %d\n", ret);
+ return ret;
+}
+
+static int hclge_get_rss(struct hnae3_handle *handle, u32 *indir,
+ u8 *key, u8 *hfunc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ int i;
+
+ /* Get hash algorithm */
+ if (hfunc) {
+ switch (vport->rss_algo) {
+ case HCLGE_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGE_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
+
+ /* Get the RSS Key required by the user */
+ if (key)
+ memcpy(key, vport->rss_hash_key, HCLGE_RSS_KEY_SIZE);
+
+ /* Get indirect table */
+ if (indir)
+ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ indir[i] = vport->rss_indirection_tbl[i];
+
+ return 0;
+}
+
+static int hclge_parse_rss_hfunc(struct hclge_vport *vport, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = vport->rss_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hclge_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u8 hash_algo;
+ int ret, i;
+
+ ret = hclge_parse_rss_hfunc(vport, hfunc, &hash_algo);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "invalid hfunc type %u\n", hfunc);
+ return ret;
+ }
+
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+ ret = hclge_set_rss_algo_key(hdev, hash_algo, key);
+ if (ret)
+ return ret;
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(vport->rss_hash_key, key, HCLGE_RSS_KEY_SIZE);
+ } else {
+ ret = hclge_set_rss_algo_key(hdev, hash_algo,
+ vport->rss_hash_key);
+ if (ret)
+ return ret;
+ }
+ vport->rss_algo = hash_algo;
+
+ /* Update the shadow RSS table with user specified qids */
+ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ vport->rss_indirection_tbl[i] = indir[i];
+
+ /* Update the hardware */
+ return hclge_set_rss_indir_table(hdev, vport->rss_indirection_tbl);
+}
+
+static u8 hclge_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGE_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGE_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGE_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGE_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGE_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGE_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGE_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+static int hclge_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_rss_input_tuple_cmd *req;
+ struct hclge_desc desc;
+ u8 tuple_sets;
+ int ret;
+
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclge_rss_input_tuple_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RSS_INPUT_TUPLE, false);
+
+ req->ipv4_tcp_en = vport->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = vport->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = vport->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = vport->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = vport->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = vport->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = vport->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = vport->rss_tuple_sets.ipv6_fragment_en;
+
+ tuple_sets = hclge_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGE_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set rss tuple fail, status = %d\n", ret);
+ return ret;
+ }
+
+ vport->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
+ vport->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
+ vport->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
+ vport->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
+ vport->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
+ vport->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
+ vport->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
+ vport->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ hclge_get_rss_type(vport);
+ return 0;
+}
+
+static int hclge_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ u8 tuple_sets;
+
+ nfc->data = 0;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ tuple_sets = vport->rss_tuple_sets.ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ tuple_sets = vport->rss_tuple_sets.ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ tuple_sets = vport->rss_tuple_sets.ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ tuple_sets = vport->rss_tuple_sets.ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ tuple_sets = vport->rss_tuple_sets.ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ tuple_sets = vport->rss_tuple_sets.ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ tuple_sets = HCLGE_S_IP_BIT | HCLGE_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!tuple_sets)
+ return 0;
+
+ if (tuple_sets & HCLGE_D_PORT_BIT)
+ nfc->data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGE_S_PORT_BIT)
+ nfc->data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGE_D_IP_BIT)
+ nfc->data |= RXH_IP_DST;
+ if (tuple_sets & HCLGE_S_IP_BIT)
+ nfc->data |= RXH_IP_SRC;
+
+ return 0;
+}
+
+static int hclge_get_tc_size(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->rss_size_max;
+}
+
+int hclge_rss_init_hw(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u8 *rss_indir = vport[0].rss_indirection_tbl;
+ u16 rss_size = vport[0].alloc_rss_size;
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ u8 *key = vport[0].rss_hash_key;
+ u8 hfunc = vport[0].rss_algo;
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ unsigned int i;
+ int ret;
+
+ ret = hclge_set_rss_indir_table(hdev, rss_indir);
+ if (ret)
+ return ret;
+
+ ret = hclge_set_rss_algo_key(hdev, hfunc, key);
+ if (ret)
+ return ret;
+
+ ret = hclge_set_rss_input_tuple(hdev);
+ if (ret)
+ return ret;
+
+ /* Each TC have the same queue size, and tc_size set to hardware is
+ * the log2 of roundup power of two of rss_size, the acutal queue
+ * size is limited by indirection table.
+ */
+ if (rss_size > HCLGE_RSS_TC_SIZE_7 || rss_size == 0) {
+ dev_err(&hdev->pdev->dev,
+ "Configure rss tc size failed, invalid TC_SIZE = %u\n",
+ rss_size);
+ return -EINVAL;
+ }
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = rss_size * i;
+ }
+
+ return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+}
+
+void hclge_rss_indir_init_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int i, j;
+
+ for (j = 0; j < hdev->num_vmdq_vport + 1; j++) {
+ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ vport[j].rss_indirection_tbl[i] =
+ i % vport[j].alloc_rss_size;
+ }
+}
+
+static void hclge_rss_init_cfg(struct hclge_dev *hdev)
+{
+ int i, rss_algo = HCLGE_RSS_HASH_ALGO_TOEPLITZ;
+ struct hclge_vport *vport = hdev->vport;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ rss_algo = HCLGE_RSS_HASH_ALGO_SIMPLE;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ vport[i].rss_tuple_sets.ipv4_tcp_en =
+ HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport[i].rss_tuple_sets.ipv4_udp_en =
+ HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport[i].rss_tuple_sets.ipv4_sctp_en =
+ HCLGE_RSS_INPUT_TUPLE_SCTP;
+ vport[i].rss_tuple_sets.ipv4_fragment_en =
+ HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport[i].rss_tuple_sets.ipv6_tcp_en =
+ HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport[i].rss_tuple_sets.ipv6_udp_en =
+ HCLGE_RSS_INPUT_TUPLE_OTHER;
+ vport[i].rss_tuple_sets.ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGE_RSS_INPUT_TUPLE_SCTP;
+ vport[i].rss_tuple_sets.ipv6_fragment_en =
+ HCLGE_RSS_INPUT_TUPLE_OTHER;
+
+ vport[i].rss_algo = rss_algo;
+
+ memcpy(vport[i].rss_hash_key, hclge_hash_key,
+ HCLGE_RSS_KEY_SIZE);
+ }
+
+ hclge_rss_indir_init_cfg(hdev);
+}
+
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ring_chain_node *node;
+ struct hclge_desc desc;
+ struct hclge_ctrl_vector_chain_cmd *req =
+ (struct hclge_ctrl_vector_chain_cmd *)desc.data;
+ enum hclge_cmd_status status;
+ enum hclge_opcode_type op;
+ u16 tqp_type_and_id;
+ int i;
+
+ op = en ? HCLGE_OPC_ADD_RING_TO_VECTOR : HCLGE_OPC_DEL_RING_TO_VECTOR;
+ hclge_cmd_setup_basic_desc(&desc, op, false);
+ req->int_vector_id = vector_id;
+
+ i = 0;
+ for (node = ring_chain; node; node = node->next) {
+ tqp_type_and_id = le16_to_cpu(req->tqp_type_and_id[i]);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_TYPE_M,
+ HCLGE_INT_TYPE_S,
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B));
+ hnae3_set_field(tqp_type_and_id, HCLGE_TQP_ID_M,
+ HCLGE_TQP_ID_S, node->tqp_index);
+ hnae3_set_field(tqp_type_and_id, HCLGE_INT_GL_IDX_M,
+ HCLGE_INT_GL_IDX_S,
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S));
+ req->tqp_type_and_id[i] = cpu_to_le16(tqp_type_and_id);
+ if (++i >= HCLGE_VECTOR_ELEMENTS_PER_CMD) {
+ req->int_cause_num = HCLGE_VECTOR_ELEMENTS_PER_CMD;
+ req->vfid = vport->vport_id;
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n",
+ status);
+ return -EIO;
+ }
+ i = 0;
+
+ hclge_cmd_setup_basic_desc(&desc,
+ op,
+ false);
+ req->int_vector_id = vector_id;
+ }
+ }
+
+ if (i > 0) {
+ req->int_cause_num = i;
+ req->vfid = vport->vport_id;
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n", status);
+ return -EIO;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int vector_id;
+
+ vector_id = hclge_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vector index. vector=%d\n", vector);
+ return vector_id;
+ }
+
+ return hclge_bind_ring_with_vector(vport, vector_id, true, ring_chain);
+}
+
+static int hclge_unmap_ring_frm_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int vector_id, ret;
+
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
+ vector_id = hclge_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ ret = hclge_bind_ring_with_vector(vport, vector_id, false, ring_chain);
+ if (ret)
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vectorid=%d, ret =%d\n",
+ vector_id, ret);
+
+ return ret;
+}
+
+static int hclge_cmd_set_promisc_mode(struct hclge_dev *hdev,
+ struct hclge_promisc_param *param)
+{
+ struct hclge_promisc_cfg_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PROMISC_MODE, false);
+
+ req = (struct hclge_promisc_cfg_cmd *)desc.data;
+ req->vf_id = param->vf_id;
+
+ /* HCLGE_PROMISC_TX_EN_B and HCLGE_PROMISC_RX_EN_B are not supported on
+ * pdev revision(0x20), new revision support them. The
+ * value of this two fields will not return error when driver
+ * send command to fireware in revision(0x20).
+ */
+ req->flag = (param->enable << HCLGE_PROMISC_EN_B) |
+ HCLGE_PROMISC_TX_EN_B | HCLGE_PROMISC_RX_EN_B;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to set vport %d promisc mode, ret = %d.\n",
+ param->vf_id, ret);
+
+ return ret;
+}
+
+static void hclge_promisc_param_init(struct hclge_promisc_param *param,
+ bool en_uc, bool en_mc, bool en_bc,
+ int vport_id)
+{
+ if (!param)
+ return;
+
+ memset(param, 0, sizeof(struct hclge_promisc_param));
+ if (en_uc)
+ param->enable = HCLGE_PROMISC_EN_UC;
+ if (en_mc)
+ param->enable |= HCLGE_PROMISC_EN_MC;
+ if (en_bc)
+ param->enable |= HCLGE_PROMISC_EN_BC;
+ param->vf_id = vport_id;
+}
+
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_promisc_param param;
+
+ hclge_promisc_param_init(&param, en_uc_pmc, en_mc_pmc, en_bc_pmc,
+ vport->vport_id);
+ return hclge_cmd_set_promisc_mode(hdev, &param);
+}
+
+static int hclge_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ bool en_bc_pmc = true;
+
+ /* For device whose version below V2, if broadcast promisc enabled,
+ * vlan filter is always bypassed. So broadcast promisc should be
+ * disabled until user enable promisc mode
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ en_bc_pmc = handle->netdev_flags & HNAE3_BPE ? true : false;
+
+ return hclge_set_vport_promisc_mode(vport, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
+}
+
+static void hclge_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+}
+
+static int hclge_get_fd_mode(struct hclge_dev *hdev, u8 *fd_mode)
+{
+ struct hclge_get_fd_mode_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_MODE_CTRL, true);
+
+ req = (struct hclge_get_fd_mode_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "get fd mode fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ *fd_mode = req->mode;
+
+ return ret;
+}
+
+static int hclge_get_fd_allocation(struct hclge_dev *hdev,
+ u32 *stage1_entry_num,
+ u32 *stage2_entry_num,
+ u16 *stage1_counter_num,
+ u16 *stage2_counter_num)
+{
+ struct hclge_get_fd_allocation_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_GET_ALLOCATION, true);
+
+ req = (struct hclge_get_fd_allocation_cmd *)desc.data;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "query fd allocation fail, ret=%d\n",
+ ret);
+ return ret;
+ }
+
+ *stage1_entry_num = le32_to_cpu(req->stage1_entry_num);
+ *stage2_entry_num = le32_to_cpu(req->stage2_entry_num);
+ *stage1_counter_num = le16_to_cpu(req->stage1_counter_num);
+ *stage2_counter_num = le16_to_cpu(req->stage2_counter_num);
+
+ return ret;
+}
+
+static int hclge_set_fd_key_config(struct hclge_dev *hdev,
+ enum HCLGE_FD_STAGE stage_num)
+{
+ struct hclge_set_fd_key_config_cmd *req;
+ struct hclge_fd_key_cfg *stage;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_KEY_CONFIG, false);
+
+ req = (struct hclge_set_fd_key_config_cmd *)desc.data;
+ stage = &hdev->fd_cfg.key_cfg[stage_num];
+ req->stage = stage_num;
+ req->key_select = stage->key_sel;
+ req->inner_sipv6_word_en = stage->inner_sipv6_word_en;
+ req->inner_dipv6_word_en = stage->inner_dipv6_word_en;
+ req->outer_sipv6_word_en = stage->outer_sipv6_word_en;
+ req->outer_dipv6_word_en = stage->outer_dipv6_word_en;
+ req->tuple_mask = cpu_to_le32(~stage->tuple_active);
+ req->meta_data_mask = cpu_to_le32(~stage->meta_data_active);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set fd key fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static int hclge_init_fd_config(struct hclge_dev *hdev)
+{
+#define LOW_2_WORDS 0x03
+ struct hclge_fd_key_cfg *key_cfg;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return 0;
+
+ ret = hclge_get_fd_mode(hdev, &hdev->fd_cfg.fd_mode);
+ if (ret)
+ return ret;
+
+ switch (hdev->fd_cfg.fd_mode) {
+ case HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH;
+ break;
+ case HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1:
+ hdev->fd_cfg.max_key_length = MAX_KEY_LENGTH / 2;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "Unsupported flow director mode %u\n",
+ hdev->fd_cfg.fd_mode);
+ return -EOPNOTSUPP;
+ }
+
+ key_cfg = &hdev->fd_cfg.key_cfg[HCLGE_FD_STAGE_1];
+ key_cfg->key_sel = HCLGE_FD_KEY_BASE_ON_TUPLE,
+ key_cfg->inner_sipv6_word_en = LOW_2_WORDS;
+ key_cfg->inner_dipv6_word_en = LOW_2_WORDS;
+ key_cfg->outer_sipv6_word_en = 0;
+ key_cfg->outer_dipv6_word_en = 0;
+
+ key_cfg->tuple_active = BIT(INNER_VLAN_TAG_FST) | BIT(INNER_ETH_TYPE) |
+ BIT(INNER_IP_PROTO) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ /* If use max 400bit key, we can support tuples for ether type */
+ if (hdev->fd_cfg.fd_mode == HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1)
+ key_cfg->tuple_active |=
+ BIT(INNER_DST_MAC) | BIT(INNER_SRC_MAC);
+
+ /* roce_type is used to filter roce frames
+ * dst_vport is used to specify the rule
+ */
+ key_cfg->meta_data_active = BIT(ROCE_TYPE) | BIT(DST_VPORT);
+
+ ret = hclge_get_fd_allocation(hdev,
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_2],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_1],
+ &hdev->fd_cfg.cnt_num[HCLGE_FD_STAGE_2]);
+ if (ret)
+ return ret;
+
+ return hclge_set_fd_key_config(hdev, HCLGE_FD_STAGE_1);
+}
+
+static int hclge_fd_tcam_config(struct hclge_dev *hdev, u8 stage, bool sel_x,
+ int loc, u8 *key, bool is_add)
+{
+ struct hclge_fd_tcam_config_1_cmd *req1;
+ struct hclge_fd_tcam_config_2_cmd *req2;
+ struct hclge_fd_tcam_config_3_cmd *req3;
+ struct hclge_desc desc[3];
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, false);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, false);
+
+ req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data;
+ req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data;
+ req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data;
+
+ req1->stage = stage;
+ req1->xy_sel = sel_x ? 1 : 0;
+ hnae3_set_bit(req1->port_info, HCLGE_FD_EPORT_SW_EN_B, 0);
+ req1->index = cpu_to_le32(loc);
+ req1->entry_vld = sel_x ? is_add : 0;
+
+ if (key) {
+ memcpy(req1->tcam_data, &key[0], sizeof(req1->tcam_data));
+ memcpy(req2->tcam_data, &key[sizeof(req1->tcam_data)],
+ sizeof(req2->tcam_data));
+ memcpy(req3->tcam_data, &key[sizeof(req1->tcam_data) +
+ sizeof(req2->tcam_data)], sizeof(req3->tcam_data));
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "config tcam key fail, ret=%d\n",
+ ret);
+
+ return ret;
+}
+
+static int hclge_fd_ad_config(struct hclge_dev *hdev, u8 stage, int loc,
+ struct hclge_fd_ad_data *action)
+{
+ struct hclge_fd_ad_config_cmd *req;
+ struct hclge_desc desc;
+ u64 ad_data = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_FD_AD_OP, false);
+
+ req = (struct hclge_fd_ad_config_cmd *)desc.data;
+ req->index = cpu_to_le32(loc);
+ req->stage = stage;
+
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_WR_RULE_ID_B,
+ action->write_rule_id_to_bd);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_RULE_ID_M, HCLGE_FD_AD_RULE_ID_S,
+ action->rule_id);
+ ad_data <<= 32;
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DROP_B, action->drop_packet);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_DIRECT_QID_B,
+ action->forward_to_direct_queue);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_QID_M, HCLGE_FD_AD_QID_S,
+ action->queue_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_USE_COUNTER_B, action->use_counter);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_COUNTER_NUM_M,
+ HCLGE_FD_AD_COUNTER_NUM_S, action->counter_id);
+ hnae3_set_bit(ad_data, HCLGE_FD_AD_NXT_STEP_B, action->use_next_stage);
+ hnae3_set_field(ad_data, HCLGE_FD_AD_NXT_KEY_M, HCLGE_FD_AD_NXT_KEY_S,
+ action->counter_id);
+
+ req->ad_data = cpu_to_le64(ad_data);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fd ad config fail, ret=%d\n", ret);
+
+ return ret;
+}
+
+static bool hclge_fd_convert_tuple(u32 tuple_bit, u8 *key_x, u8 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ u16 tmp_x_s, tmp_y_s;
+ u32 tmp_x_l, tmp_y_l;
+ int i;
+
+ if (rule->unused_tuple & tuple_bit)
+ return true;
+
+ switch (tuple_bit) {
+ case BIT(INNER_DST_MAC):
+ for (i = 0; i < ETH_ALEN; i++) {
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
+ rule->tuples_mask.dst_mac[i]);
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.dst_mac[i],
+ rule->tuples_mask.dst_mac[i]);
+ }
+
+ return true;
+ case BIT(INNER_SRC_MAC):
+ for (i = 0; i < ETH_ALEN; i++) {
+ calc_x(key_x[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
+ rule->tuples_mask.src_mac[i]);
+ calc_y(key_y[ETH_ALEN - 1 - i], rule->tuples.src_mac[i],
+ rule->tuples_mask.src_mac[i]);
+ }
+
+ return true;
+ case BIT(INNER_VLAN_TAG_FST):
+ calc_x(tmp_x_s, rule->tuples.vlan_tag1,
+ rule->tuples_mask.vlan_tag1);
+ calc_y(tmp_y_s, rule->tuples.vlan_tag1,
+ rule->tuples_mask.vlan_tag1);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_ETH_TYPE):
+ calc_x(tmp_x_s, rule->tuples.ether_proto,
+ rule->tuples_mask.ether_proto);
+ calc_y(tmp_y_s, rule->tuples.ether_proto,
+ rule->tuples_mask.ether_proto);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_IP_TOS):
+ calc_x(*key_x, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+ calc_y(*key_y, rule->tuples.ip_tos, rule->tuples_mask.ip_tos);
+
+ return true;
+ case BIT(INNER_IP_PROTO):
+ calc_x(*key_x, rule->tuples.ip_proto,
+ rule->tuples_mask.ip_proto);
+ calc_y(*key_y, rule->tuples.ip_proto,
+ rule->tuples_mask.ip_proto);
+
+ return true;
+ case BIT(INNER_SRC_IP):
+ calc_x(tmp_x_l, rule->tuples.src_ip[IPV4_INDEX],
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
+ calc_y(tmp_y_l, rule->tuples.src_ip[IPV4_INDEX],
+ rule->tuples_mask.src_ip[IPV4_INDEX]);
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ case BIT(INNER_DST_IP):
+ calc_x(tmp_x_l, rule->tuples.dst_ip[IPV4_INDEX],
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
+ calc_y(tmp_y_l, rule->tuples.dst_ip[IPV4_INDEX],
+ rule->tuples_mask.dst_ip[IPV4_INDEX]);
+ *(__le32 *)key_x = cpu_to_le32(tmp_x_l);
+ *(__le32 *)key_y = cpu_to_le32(tmp_y_l);
+
+ return true;
+ case BIT(INNER_SRC_PORT):
+ calc_x(tmp_x_s, rule->tuples.src_port,
+ rule->tuples_mask.src_port);
+ calc_y(tmp_y_s, rule->tuples.src_port,
+ rule->tuples_mask.src_port);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ case BIT(INNER_DST_PORT):
+ calc_x(tmp_x_s, rule->tuples.dst_port,
+ rule->tuples_mask.dst_port);
+ calc_y(tmp_y_s, rule->tuples.dst_port,
+ rule->tuples_mask.dst_port);
+ *(__le16 *)key_x = cpu_to_le16(tmp_x_s);
+ *(__le16 *)key_y = cpu_to_le16(tmp_y_s);
+
+ return true;
+ default:
+ return false;
+ }
+}
+
+static u32 hclge_get_port_number(enum HLCGE_PORT_TYPE port_type, u8 pf_id,
+ u8 vf_id, u8 network_port_id)
+{
+ u32 port_number = 0;
+
+ if (port_type == HOST_PORT) {
+ hnae3_set_field(port_number, HCLGE_PF_ID_M, HCLGE_PF_ID_S,
+ pf_id);
+ hnae3_set_field(port_number, HCLGE_VF_ID_M, HCLGE_VF_ID_S,
+ vf_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, HOST_PORT);
+ } else {
+ hnae3_set_field(port_number, HCLGE_NETWORK_PORT_ID_M,
+ HCLGE_NETWORK_PORT_ID_S, network_port_id);
+ hnae3_set_bit(port_number, HCLGE_PORT_TYPE_B, NETWORK_PORT);
+ }
+
+ return port_number;
+}
+
+static void hclge_fd_convert_meta_data(struct hclge_fd_key_cfg *key_cfg,
+ __le32 *key_x, __le32 *key_y,
+ struct hclge_fd_rule *rule)
+{
+ u32 tuple_bit, meta_data = 0, tmp_x, tmp_y, port_number;
+ u8 cur_pos = 0, tuple_size, shift_bits;
+ unsigned int i;
+
+ for (i = 0; i < MAX_META_DATA; i++) {
+ tuple_size = meta_data_key_info[i].key_length;
+ tuple_bit = key_cfg->meta_data_active & BIT(i);
+
+ switch (tuple_bit) {
+ case BIT(ROCE_TYPE):
+ hnae3_set_bit(meta_data, cur_pos, NIC_PACKET);
+ cur_pos += tuple_size;
+ break;
+ case BIT(DST_VPORT):
+ port_number = hclge_get_port_number(HOST_PORT, 0,
+ rule->vf_id, 0);
+ hnae3_set_field(meta_data,
+ GENMASK(cur_pos + tuple_size, cur_pos),
+ cur_pos, port_number);
+ cur_pos += tuple_size;
+ break;
+ default:
+ break;
+ }
+ }
+
+ calc_x(tmp_x, meta_data, 0xFFFFFFFF);
+ calc_y(tmp_y, meta_data, 0xFFFFFFFF);
+ shift_bits = sizeof(meta_data) * 8 - cur_pos;
+
+ *key_x = cpu_to_le32(tmp_x << shift_bits);
+ *key_y = cpu_to_le32(tmp_y << shift_bits);
+}
+
+/* A complete key is combined with meta data key and tuple key.
+ * Meta data key is stored at the MSB region, and tuple key is stored at
+ * the LSB region, unused bits will be filled 0.
+ */
+static int hclge_config_key(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_key_cfg *key_cfg = &hdev->fd_cfg.key_cfg[stage];
+ u8 key_x[MAX_KEY_BYTES], key_y[MAX_KEY_BYTES];
+ u8 *cur_key_x, *cur_key_y;
+ u8 meta_data_region;
+ u8 tuple_size;
+ int ret;
+ u32 i;
+
+ memset(key_x, 0, sizeof(key_x));
+ memset(key_y, 0, sizeof(key_y));
+ cur_key_x = key_x;
+ cur_key_y = key_y;
+
+ for (i = 0 ; i < MAX_TUPLE; i++) {
+ bool tuple_valid;
+ u32 check_tuple;
+
+ tuple_size = tuple_key_info[i].key_length / 8;
+ check_tuple = key_cfg->tuple_active & BIT(i);
+
+ tuple_valid = hclge_fd_convert_tuple(check_tuple, cur_key_x,
+ cur_key_y, rule);
+ if (tuple_valid) {
+ cur_key_x += tuple_size;
+ cur_key_y += tuple_size;
+ }
+ }
+
+ meta_data_region = hdev->fd_cfg.max_key_length / 8 -
+ MAX_META_DATA_LENGTH / 8;
+
+ hclge_fd_convert_meta_data(key_cfg,
+ (__le32 *)(key_x + meta_data_region),
+ (__le32 *)(key_y + meta_data_region),
+ rule);
+
+ ret = hclge_fd_tcam_config(hdev, stage, false, rule->location, key_y,
+ true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "fd key_y config fail, loc=%u, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, stage, true, rule->location, key_x,
+ true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "fd key_x config fail, loc=%u, ret=%d\n",
+ rule->queue_id, ret);
+ return ret;
+}
+
+static int hclge_config_action(struct hclge_dev *hdev, u8 stage,
+ struct hclge_fd_rule *rule)
+{
+ struct hclge_fd_ad_data ad_data;
+
+ ad_data.ad_id = rule->location;
+
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ ad_data.drop_packet = true;
+ ad_data.forward_to_direct_queue = false;
+ ad_data.queue_id = 0;
+ } else {
+ ad_data.drop_packet = false;
+ ad_data.forward_to_direct_queue = true;
+ ad_data.queue_id = rule->queue_id;
+ }
+
+ ad_data.use_counter = false;
+ ad_data.counter_id = 0;
+
+ ad_data.use_next_stage = false;
+ ad_data.next_input_key = 0;
+
+ ad_data.write_rule_id_to_bd = true;
+ ad_data.rule_id = rule->location;
+
+ return hclge_fd_ad_config(hdev, stage, ad_data.ad_id, &ad_data);
+}
+
+static int hclge_fd_check_tcpip4_tuple(struct ethtool_tcpip4_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC);
+
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
+
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
+
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
+
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
+
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
+
+ return 0;
+}
+
+static int hclge_fd_check_ip4_tuple(struct ethtool_usrip4_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ if (!spec->ip4src)
+ *unused_tuple |= BIT(INNER_SRC_IP);
+
+ if (!spec->ip4dst)
+ *unused_tuple |= BIT(INNER_DST_IP);
+
+ if (!spec->tos)
+ *unused_tuple |= BIT(INNER_IP_TOS);
+
+ if (!spec->proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
+
+ if (spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ if (spec->ip_ver != ETH_RX_NFC_IP4)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int hclge_fd_check_tcpip6_tuple(struct ethtool_tcpip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS);
+
+ /* check whether src/dst ip address used */
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
+ !spec->ip6src[2] && !spec->ip6src[3])
+ *unused_tuple |= BIT(INNER_SRC_IP);
+
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
+ !spec->ip6dst[2] && !spec->ip6dst[3])
+ *unused_tuple |= BIT(INNER_DST_IP);
+
+ if (!spec->psrc)
+ *unused_tuple |= BIT(INNER_SRC_PORT);
+
+ if (!spec->pdst)
+ *unused_tuple |= BIT(INNER_DST_PORT);
+
+ if (spec->tclass)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int hclge_fd_check_ip6_tuple(struct ethtool_usrip6_spec *spec,
+ u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_IP_TOS) | BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT);
+
+ /* check whether src/dst ip address used */
+ if (!spec->ip6src[0] && !spec->ip6src[1] &&
+ !spec->ip6src[2] && !spec->ip6src[3])
+ *unused_tuple |= BIT(INNER_SRC_IP);
+
+ if (!spec->ip6dst[0] && !spec->ip6dst[1] &&
+ !spec->ip6dst[2] && !spec->ip6dst[3])
+ *unused_tuple |= BIT(INNER_DST_IP);
+
+ if (!spec->l4_proto)
+ *unused_tuple |= BIT(INNER_IP_PROTO);
+
+ if (spec->tclass)
+ return -EOPNOTSUPP;
+
+ if (spec->l4_4_bytes)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+static int hclge_fd_check_ether_tuple(struct ethhdr *spec, u32 *unused_tuple)
+{
+ if (!spec || !unused_tuple)
+ return -EINVAL;
+
+ *unused_tuple |= BIT(INNER_SRC_IP) | BIT(INNER_DST_IP) |
+ BIT(INNER_SRC_PORT) | BIT(INNER_DST_PORT) |
+ BIT(INNER_IP_TOS) | BIT(INNER_IP_PROTO);
+
+ if (is_zero_ether_addr(spec->h_source))
+ *unused_tuple |= BIT(INNER_SRC_MAC);
+
+ if (is_zero_ether_addr(spec->h_dest))
+ *unused_tuple |= BIT(INNER_DST_MAC);
+
+ if (!spec->h_proto)
+ *unused_tuple |= BIT(INNER_ETH_TYPE);
+
+ return 0;
+}
+
+static int hclge_fd_check_ext_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ if (fs->h_ext.vlan_etype) {
+ dev_err(&hdev->pdev->dev, "vlan-etype is not supported!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!fs->h_ext.vlan_tci)
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
+
+ if (fs->m_ext.vlan_tci &&
+ be16_to_cpu(fs->h_ext.vlan_tci) >= VLAN_N_VID) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config vlan_tci, invalid vlan_tci: %u, max is %u.\n",
+ ntohs(fs->h_ext.vlan_tci), VLAN_N_VID - 1);
+ return -EINVAL;
+ }
+ } else {
+ *unused_tuple |= BIT(INNER_VLAN_TAG_FST);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "FLOW_MAC_EXT is not supported in current fd mode!\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (is_zero_ether_addr(fs->h_ext.h_dest))
+ *unused_tuple |= BIT(INNER_DST_MAC);
+ else
+ *unused_tuple &= ~BIT(INNER_DST_MAC);
+ }
+
+ return 0;
+}
+
+static int hclge_fd_check_spec(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ u32 *unused_tuple)
+{
+ u32 flow_type;
+ int ret;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ dev_err(&hdev->pdev->dev,
+ "failed to config fd rules, invalid rule location: %u, max is %u\n.",
+ fs->location,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1] - 1);
+ return -EINVAL;
+ }
+
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->h_ext.data[0] != 0 || fs->h_ext.data[1] != 0)) {
+ dev_err(&hdev->pdev->dev, "user-def bytes are not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ ret = hclge_fd_check_tcpip4_tuple(&fs->h_u.tcp_ip4_spec,
+ unused_tuple);
+ break;
+ case IP_USER_FLOW:
+ ret = hclge_fd_check_ip4_tuple(&fs->h_u.usr_ip4_spec,
+ unused_tuple);
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ ret = hclge_fd_check_tcpip6_tuple(&fs->h_u.tcp_ip6_spec,
+ unused_tuple);
+ break;
+ case IPV6_USER_FLOW:
+ ret = hclge_fd_check_ip6_tuple(&fs->h_u.usr_ip6_spec,
+ unused_tuple);
+ break;
+ case ETHER_FLOW:
+ if (hdev->fd_cfg.fd_mode !=
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1) {
+ dev_err(&hdev->pdev->dev,
+ "ETHER_FLOW is not supported in current fd mode!\n");
+ return -EOPNOTSUPP;
+ }
+
+ ret = hclge_fd_check_ether_tuple(&fs->h_u.ether_spec,
+ unused_tuple);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported protocol type, protocol type = %#x\n",
+ flow_type);
+ return -EOPNOTSUPP;
+ }
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to check flow union tuple, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return hclge_fd_check_ext_tuple(hdev, fs, unused_tuple);
+}
+
+static bool hclge_fd_rule_exist(struct hclge_dev *hdev, u16 location)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node2;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= location)
+ break;
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return rule && rule->location == location;
+}
+
+/* make sure being called after lock up with fd_rule_lock */
+static int hclge_fd_update_rule_list(struct hclge_dev *hdev,
+ struct hclge_fd_rule *new_rule,
+ u16 location,
+ bool is_add)
+{
+ struct hclge_fd_rule *rule = NULL, *parent = NULL;
+ struct hlist_node *node2;
+
+ if (is_add && !new_rule)
+ return -EINVAL;
+
+ hlist_for_each_entry_safe(rule, node2,
+ &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= location)
+ break;
+ parent = rule;
+ }
+
+ if (rule && rule->location == location) {
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+
+ if (!is_add) {
+ if (!hdev->hclge_fd_rule_num)
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ clear_bit(location, hdev->fd_bmap);
+
+ return 0;
+ }
+ } else if (!is_add) {
+ dev_err(&hdev->pdev->dev,
+ "delete fail, rule %u is inexistent\n",
+ location);
+ return -EINVAL;
+ }
+
+ INIT_HLIST_NODE(&new_rule->rule_node);
+
+ if (parent)
+ hlist_add_behind(&new_rule->rule_node, &parent->rule_node);
+ else
+ hlist_add_head(&new_rule->rule_node, &hdev->fd_rule_list);
+
+ set_bit(location, hdev->fd_bmap);
+ hdev->hclge_fd_rule_num++;
+ hdev->fd_active_type = new_rule->rule_type;
+
+ return 0;
+}
+
+static int hclge_fd_get_tuple(struct hclge_dev *hdev,
+ struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
+
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ rule->tuples.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.tcp_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.tcp_ip4_spec.ip4dst);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc);
+ rule->tuples_mask.src_port =
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst);
+ rule->tuples_mask.dst_port =
+ be16_to_cpu(fs->m_u.tcp_ip4_spec.pdst);
+
+ rule->tuples.ip_tos = fs->h_u.tcp_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.tcp_ip4_spec.tos;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case IP_USER_FLOW:
+ rule->tuples.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4src);
+ rule->tuples_mask.src_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4src);
+
+ rule->tuples.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->h_u.usr_ip4_spec.ip4dst);
+ rule->tuples_mask.dst_ip[IPV4_INDEX] =
+ be32_to_cpu(fs->m_u.usr_ip4_spec.ip4dst);
+
+ rule->tuples.ip_tos = fs->h_u.usr_ip4_spec.tos;
+ rule->tuples_mask.ip_tos = fs->m_u.usr_ip4_spec.tos;
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip4_spec.proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip4_spec.proto;
+
+ rule->tuples.ether_proto = ETH_P_IP;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ be32_to_cpu_array(rule->tuples.src_ip,
+ fs->h_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ fs->m_u.tcp_ip6_spec.ip6src, IPV6_SIZE);
+
+ be32_to_cpu_array(rule->tuples.dst_ip,
+ fs->h_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ fs->m_u.tcp_ip6_spec.ip6dst, IPV6_SIZE);
+
+ rule->tuples.src_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.psrc);
+ rule->tuples_mask.src_port =
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.psrc);
+
+ rule->tuples.dst_port = be16_to_cpu(fs->h_u.tcp_ip6_spec.pdst);
+ rule->tuples_mask.dst_port =
+ be16_to_cpu(fs->m_u.tcp_ip6_spec.pdst);
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case IPV6_USER_FLOW:
+ be32_to_cpu_array(rule->tuples.src_ip,
+ fs->h_u.usr_ip6_spec.ip6src, IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.src_ip,
+ fs->m_u.usr_ip6_spec.ip6src, IPV6_SIZE);
+
+ be32_to_cpu_array(rule->tuples.dst_ip,
+ fs->h_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
+ be32_to_cpu_array(rule->tuples_mask.dst_ip,
+ fs->m_u.usr_ip6_spec.ip6dst, IPV6_SIZE);
+
+ rule->tuples.ip_proto = fs->h_u.usr_ip6_spec.l4_proto;
+ rule->tuples_mask.ip_proto = fs->m_u.usr_ip6_spec.l4_proto;
+
+ rule->tuples.ether_proto = ETH_P_IPV6;
+ rule->tuples_mask.ether_proto = 0xFFFF;
+
+ break;
+ case ETHER_FLOW:
+ ether_addr_copy(rule->tuples.src_mac,
+ fs->h_u.ether_spec.h_source);
+ ether_addr_copy(rule->tuples_mask.src_mac,
+ fs->m_u.ether_spec.h_source);
+
+ ether_addr_copy(rule->tuples.dst_mac,
+ fs->h_u.ether_spec.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac,
+ fs->m_u.ether_spec.h_dest);
+
+ rule->tuples.ether_proto =
+ be16_to_cpu(fs->h_u.ether_spec.h_proto);
+ rule->tuples_mask.ether_proto =
+ be16_to_cpu(fs->m_u.ether_spec.h_proto);
+
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (flow_type) {
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_SCTP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_TCP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ rule->tuples.ip_proto = IPPROTO_UDP;
+ rule->tuples_mask.ip_proto = 0xFF;
+ break;
+ default:
+ break;
+ }
+
+ if (fs->flow_type & FLOW_EXT) {
+ rule->tuples.vlan_tag1 = be16_to_cpu(fs->h_ext.vlan_tci);
+ rule->tuples_mask.vlan_tag1 = be16_to_cpu(fs->m_ext.vlan_tci);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(rule->tuples.dst_mac, fs->h_ext.h_dest);
+ ether_addr_copy(rule->tuples_mask.dst_mac, fs->m_ext.h_dest);
+ }
+
+ return 0;
+}
+
+/* make sure being called after lock up with fd_rule_lock */
+static int hclge_fd_config_rule(struct hclge_dev *hdev,
+ struct hclge_fd_rule *rule)
+{
+ int ret;
+
+ if (!rule) {
+ dev_err(&hdev->pdev->dev,
+ "The flow director rule is NULL\n");
+ return -EINVAL;
+ }
+
+ /* it will never fail here, so needn't to check return value */
+ hclge_fd_update_rule_list(hdev, rule, rule->location, true);
+
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto clear_rule;
+
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret)
+ goto clear_rule;
+
+ return 0;
+
+clear_rule:
+ hclge_fd_update_rule_list(hdev, rule, rule->location, false);
+ return ret;
+}
+
+static int hclge_add_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u16 dst_vport_id = 0, q_index = 0;
+ struct ethtool_rx_flow_spec *fs;
+ struct hclge_fd_rule *rule;
+ u32 unused = 0;
+ u8 action;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev)) {
+ dev_err(&hdev->pdev->dev,
+ "flow table director is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (!hdev->fd_en) {
+ dev_err(&hdev->pdev->dev,
+ "please enable flow director first\n");
+ return -EOPNOTSUPP;
+ }
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ ret = hclge_fd_check_spec(hdev, fs, &unused);
+ if (ret)
+ return ret;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC) {
+ action = HCLGE_FD_ACTION_DROP_PACKET;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fs->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie);
+ u16 tqps;
+
+ if (vf > hdev->num_req_vfs) {
+ dev_err(&hdev->pdev->dev,
+ "Error: vf id (%u) > max vf num (%u)\n",
+ vf, hdev->num_req_vfs);
+ return -EINVAL;
+ }
+
+ dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id;
+ tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps;
+
+ if (ring >= tqps) {
+ dev_err(&hdev->pdev->dev,
+ "Error: queue id (%u) > max tqp num (%u)\n",
+ ring, tqps - 1);
+ return -EINVAL;
+ }
+
+ action = HCLGE_FD_ACTION_ACCEPT_PACKET;
+ q_index = ring;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
+ ret = hclge_fd_get_tuple(hdev, fs, rule);
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->flow_type = fs->flow_type;
+ rule->location = fs->location;
+ rule->unused_tuple = unused;
+ rule->vf_id = dst_vport_id;
+ rule->queue_id = q_index;
+ rule->action = action;
+ rule->rule_type = HCLGE_FD_EP_ACTIVE;
+
+ /* to avoid rule conflict, when user configure rule by ethtool,
+ * we need to clear all arfs rules
+ */
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hclge_clear_arfs_rules(handle);
+
+ ret = hclge_fd_config_rule(hdev, rule);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return ret;
+}
+
+static int hclge_del_fd_entry(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fs->location >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ return -EINVAL;
+
+ if (!hclge_fd_rule_exist(hdev, fs->location)) {
+ dev_err(&hdev->pdev->dev,
+ "Delete fail, rule %u is inexistent\n", fs->location);
+ return -ENOENT;
+ }
+
+ ret = hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, fs->location,
+ NULL, false);
+ if (ret)
+ return ret;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ ret = hclge_fd_update_rule_list(hdev, NULL, fs->location, false);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return ret;
+}
+
+/* make sure being called after lock up with fd_rule_lock */
+static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
+ bool clear_list)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ u16 location;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return;
+
+ for_each_set_bit(location, hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
+ NULL, false);
+
+ if (clear_list) {
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list,
+ rule_node) {
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ }
+ hdev->fd_active_type = HCLGE_FD_RULE_NONE;
+ hdev->hclge_fd_rule_num = 0;
+ bitmap_zero(hdev->fd_bmap,
+ hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
+ }
+}
+
+static int hclge_restore_fd_entries(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ int ret;
+
+ /* Return ok here, because reset error handling will check this
+ * return value. If error is returned here, the reset process will
+ * fail.
+ */
+ if (!hnae3_dev_fd_supported(hdev))
+ return 0;
+
+ /* if fd is disabled, should not restore it when reset */
+ if (!hdev->fd_en)
+ return 0;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (!ret)
+ ret = hclge_config_key(hdev, HCLGE_FD_STAGE_1, rule);
+
+ if (ret) {
+ dev_warn(&hdev->pdev->dev,
+ "Restore rule %u failed, remove it\n",
+ rule->location);
+ clear_bit(rule->location, hdev->fd_bmap);
+ hlist_del(&rule->rule_node);
+ kfree(rule);
+ hdev->hclge_fd_rule_num--;
+ }
+ }
+
+ if (hdev->hclge_fd_rule_num)
+ hdev->fd_active_type = HCLGE_FD_EP_ACTIVE;
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return 0;
+}
+
+static int hclge_get_fd_rule_cnt(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ cmd->rule_cnt = hdev->hclge_fd_rule_num;
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ return 0;
+}
+
+static void hclge_fd_get_tcpip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip4_spec *spec,
+ struct ethtool_tcpip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+}
+
+static void hclge_fd_get_ip4_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip4_spec *spec,
+ struct ethtool_usrip4_spec *spec_mask)
+{
+ spec->ip4src = cpu_to_be32(rule->tuples.src_ip[IPV4_INDEX]);
+ spec_mask->ip4src = rule->unused_tuple & BIT(INNER_SRC_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.src_ip[IPV4_INDEX]);
+
+ spec->ip4dst = cpu_to_be32(rule->tuples.dst_ip[IPV4_INDEX]);
+ spec_mask->ip4dst = rule->unused_tuple & BIT(INNER_DST_IP) ?
+ 0 : cpu_to_be32(rule->tuples_mask.dst_ip[IPV4_INDEX]);
+
+ spec->tos = rule->tuples.ip_tos;
+ spec_mask->tos = rule->unused_tuple & BIT(INNER_IP_TOS) ?
+ 0 : rule->tuples_mask.ip_tos;
+
+ spec->proto = rule->tuples.ip_proto;
+ spec_mask->proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+
+ spec->ip_ver = ETH_RX_NFC_IP4;
+}
+
+static void hclge_fd_get_tcpip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_tcpip6_spec *spec,
+ struct ethtool_tcpip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src,
+ rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst,
+ rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src, rule->tuples_mask.src_ip,
+ IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst, rule->tuples_mask.dst_ip,
+ IPV6_SIZE);
+
+ spec->psrc = cpu_to_be16(rule->tuples.src_port);
+ spec_mask->psrc = rule->unused_tuple & BIT(INNER_SRC_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.src_port);
+
+ spec->pdst = cpu_to_be16(rule->tuples.dst_port);
+ spec_mask->pdst = rule->unused_tuple & BIT(INNER_DST_PORT) ?
+ 0 : cpu_to_be16(rule->tuples_mask.dst_port);
+}
+
+static void hclge_fd_get_ip6_info(struct hclge_fd_rule *rule,
+ struct ethtool_usrip6_spec *spec,
+ struct ethtool_usrip6_spec *spec_mask)
+{
+ cpu_to_be32_array(spec->ip6src, rule->tuples.src_ip, IPV6_SIZE);
+ cpu_to_be32_array(spec->ip6dst, rule->tuples.dst_ip, IPV6_SIZE);
+ if (rule->unused_tuple & BIT(INNER_SRC_IP))
+ memset(spec_mask->ip6src, 0, sizeof(spec_mask->ip6src));
+ else
+ cpu_to_be32_array(spec_mask->ip6src,
+ rule->tuples_mask.src_ip, IPV6_SIZE);
+
+ if (rule->unused_tuple & BIT(INNER_DST_IP))
+ memset(spec_mask->ip6dst, 0, sizeof(spec_mask->ip6dst));
+ else
+ cpu_to_be32_array(spec_mask->ip6dst,
+ rule->tuples_mask.dst_ip, IPV6_SIZE);
+
+ spec->l4_proto = rule->tuples.ip_proto;
+ spec_mask->l4_proto = rule->unused_tuple & BIT(INNER_IP_PROTO) ?
+ 0 : rule->tuples_mask.ip_proto;
+}
+
+static void hclge_fd_get_ether_info(struct hclge_fd_rule *rule,
+ struct ethhdr *spec,
+ struct ethhdr *spec_mask)
+{
+ ether_addr_copy(spec->h_source, rule->tuples.src_mac);
+ ether_addr_copy(spec->h_dest, rule->tuples.dst_mac);
+
+ if (rule->unused_tuple & BIT(INNER_SRC_MAC))
+ eth_zero_addr(spec_mask->h_source);
+ else
+ ether_addr_copy(spec_mask->h_source, rule->tuples_mask.src_mac);
+
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(spec_mask->h_dest);
+ else
+ ether_addr_copy(spec_mask->h_dest, rule->tuples_mask.dst_mac);
+
+ spec->h_proto = cpu_to_be16(rule->tuples.ether_proto);
+ spec_mask->h_proto = rule->unused_tuple & BIT(INNER_ETH_TYPE) ?
+ 0 : cpu_to_be16(rule->tuples_mask.ether_proto);
+}
+
+static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
+ struct hclge_fd_rule *rule)
+{
+ if (fs->flow_type & FLOW_EXT) {
+ fs->h_ext.vlan_tci = cpu_to_be16(rule->tuples.vlan_tag1);
+ fs->m_ext.vlan_tci =
+ rule->unused_tuple & BIT(INNER_VLAN_TAG_FST) ?
+ 0 : cpu_to_be16(rule->tuples_mask.vlan_tag1);
+ }
+
+ if (fs->flow_type & FLOW_MAC_EXT) {
+ ether_addr_copy(fs->h_ext.h_dest, rule->tuples.dst_mac);
+ if (rule->unused_tuple & BIT(INNER_DST_MAC))
+ eth_zero_addr(fs->m_u.ether_spec.h_dest);
+ else
+ ether_addr_copy(fs->m_u.ether_spec.h_dest,
+ rule->tuples_mask.dst_mac);
+ }
+}
+
+static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule *rule = NULL;
+ struct hclge_dev *hdev = vport->back;
+ struct ethtool_rx_flow_spec *fs;
+ struct hlist_node *node2;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ fs = (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+
+ hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
+ if (rule->location >= fs->location)
+ break;
+ }
+
+ if (!rule || fs->location != rule->location) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return -ENOENT;
+ }
+
+ fs->flow_type = rule->flow_type;
+ switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) {
+ case SCTP_V4_FLOW:
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ hclge_fd_get_tcpip4_info(rule, &fs->h_u.tcp_ip4_spec,
+ &fs->m_u.tcp_ip4_spec);
+ break;
+ case IP_USER_FLOW:
+ hclge_fd_get_ip4_info(rule, &fs->h_u.usr_ip4_spec,
+ &fs->m_u.usr_ip4_spec);
+ break;
+ case SCTP_V6_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ hclge_fd_get_tcpip6_info(rule, &fs->h_u.tcp_ip6_spec,
+ &fs->m_u.tcp_ip6_spec);
+ break;
+ case IPV6_USER_FLOW:
+ hclge_fd_get_ip6_info(rule, &fs->h_u.usr_ip6_spec,
+ &fs->m_u.usr_ip6_spec);
+ break;
+ /* The flow type of fd rule has been checked before adding in to rule
+ * list. As other flow types have been handled, it must be ETHER_FLOW
+ * for the default case
+ */
+ default:
+ hclge_fd_get_ether_info(rule, &fs->h_u.ether_spec,
+ &fs->m_u.ether_spec);
+ break;
+ }
+
+ hclge_fd_get_ext_info(fs, rule);
+
+ if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
+ fs->ring_cookie = RX_CLS_FLOW_DISC;
+ } else {
+ u64 vf_id;
+
+ fs->ring_cookie = rule->queue_id;
+ vf_id = rule->vf_id;
+ vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
+ fs->ring_cookie |= vf_id;
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ return 0;
+}
+
+static int hclge_get_all_rules(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *cmd, u32 *rule_locs)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node2;
+ int cnt = 0;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ cmd->data = hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1];
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hlist_for_each_entry_safe(rule, node2,
+ &hdev->fd_rule_list, rule_node) {
+ if (cnt == cmd->rule_cnt) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -EMSGSIZE;
+ }
+
+ rule_locs[cnt] = rule->location;
+ cnt++;
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
+ struct hclge_fd_rule_tuples *tuples)
+{
+#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
+#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
+
+ tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
+ tuples->ip_proto = fkeys->basic.ip_proto;
+ tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
+
+ if (fkeys->basic.n_proto == htons(ETH_P_IP)) {
+ tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
+ tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
+ } else {
+ int i;
+
+ for (i = 0; i < IPV6_SIZE; i++) {
+ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
+ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
+ }
+ }
+}
+
+/* traverse all rules, check whether an existed rule has the same tuples */
+static struct hclge_fd_rule *
+hclge_fd_search_flow_keys(struct hclge_dev *hdev,
+ const struct hclge_fd_rule_tuples *tuples)
+{
+ struct hclge_fd_rule *rule = NULL;
+ struct hlist_node *node;
+
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (!memcmp(tuples, &rule->tuples, sizeof(*tuples)))
+ return rule;
+ }
+
+ return NULL;
+}
+
+static void hclge_fd_build_arfs_rule(const struct hclge_fd_rule_tuples *tuples,
+ struct hclge_fd_rule *rule)
+{
+ rule->unused_tuple = BIT(INNER_SRC_MAC) | BIT(INNER_DST_MAC) |
+ BIT(INNER_VLAN_TAG_FST) | BIT(INNER_IP_TOS) |
+ BIT(INNER_SRC_PORT);
+ rule->action = 0;
+ rule->vf_id = 0;
+ rule->rule_type = HCLGE_FD_ARFS_ACTIVE;
+ if (tuples->ether_proto == ETH_P_IP) {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V4_FLOW;
+ else
+ rule->flow_type = UDP_V4_FLOW;
+ } else {
+ if (tuples->ip_proto == IPPROTO_TCP)
+ rule->flow_type = TCP_V6_FLOW;
+ else
+ rule->flow_type = UDP_V6_FLOW;
+ }
+ memcpy(&rule->tuples, tuples, sizeof(rule->tuples));
+ memset(&rule->tuples_mask, 0xFF, sizeof(rule->tuples_mask));
+}
+
+static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
+ u16 flow_id, struct flow_keys *fkeys)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_fd_rule_tuples new_tuples = {};
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_fd_rule *rule;
+ u16 tmp_queue_id;
+ u16 bit_id;
+ int ret;
+
+ if (!hnae3_dev_fd_supported(hdev))
+ return -EOPNOTSUPP;
+
+ /* when there is already fd rule existed add by user,
+ * arfs should not work
+ */
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -EOPNOTSUPP;
+ }
+
+ hclge_fd_get_flow_tuples(fkeys, &new_tuples);
+
+ /* check is there flow director filter existed for this flow,
+ * if not, create a new filter for it;
+ * if filter exist with different queue id, modify the filter;
+ * if filter exist with same queue id, do nothing
+ */
+ rule = hclge_fd_search_flow_keys(hdev, &new_tuples);
+ if (!rule) {
+ bit_id = find_first_zero_bit(hdev->fd_bmap, MAX_FD_FILTER_NUM);
+ if (bit_id >= hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -ENOSPC;
+ }
+
+ rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
+ if (!rule) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return -ENOMEM;
+ }
+
+ set_bit(bit_id, hdev->fd_bmap);
+ rule->location = bit_id;
+ rule->flow_id = flow_id;
+ rule->queue_id = queue_id;
+ hclge_fd_build_arfs_rule(&new_tuples, rule);
+ ret = hclge_fd_config_rule(hdev, rule);
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (ret)
+ return ret;
+
+ return rule->location;
+ }
+
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ if (rule->queue_id == queue_id)
+ return rule->location;
+
+ tmp_queue_id = rule->queue_id;
+ rule->queue_id = queue_id;
+ ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule);
+ if (ret) {
+ rule->queue_id = tmp_queue_id;
+ return ret;
+ }
+
+ return rule->location;
+}
+
+static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct hclge_fd_rule *rule;
+ struct hlist_node *node;
+ HLIST_HEAD(del_list);
+
+ spin_lock_bh(&hdev->fd_rule_lock);
+ if (hdev->fd_active_type != HCLGE_FD_ARFS_ACTIVE) {
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ return;
+ }
+ hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) {
+ if (rps_may_expire_flow(handle->netdev, rule->queue_id,
+ rule->flow_id, rule->location)) {
+ hlist_del_init(&rule->rule_node);
+ hlist_add_head(&rule->rule_node, &del_list);
+ hdev->hclge_fd_rule_num--;
+ clear_bit(rule->location, hdev->fd_bmap);
+ }
+ }
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ hlist_for_each_entry_safe(rule, node, &del_list, rule_node) {
+ hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true,
+ rule->location, NULL, false);
+ kfree(rule);
+ }
+#endif
+}
+
+/* make sure being called after lock up with fd_rule_lock */
+static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
+{
+#ifdef CONFIG_RFS_ACCEL
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE)
+ hclge_del_all_fd_entries(handle, true);
+#endif
+}
+
+static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) ||
+ hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING);
+}
+
+static bool hclge_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+}
+
+static bool hclge_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->rst_stats.hw_reset_done_cnt;
+}
+
+static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ bool clear;
+
+ hdev->fd_en = enable;
+ clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
+
+ if (!enable) {
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hclge_del_all_fd_entries(handle, clear);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+ } else {
+ hclge_restore_fd_entries(handle);
+ }
+}
+
+static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
+{
+#define HCLGE_LINK_STATUS_WAIT_CNT 3
+
+ struct hclge_desc desc;
+ struct hclge_config_mac_mode_cmd *req =
+ (struct hclge_config_mac_mode_cmd *)desc.data;
+ u32 loop_en = 0;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, false);
+
+ if (enable) {
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_EN_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_EN_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_TX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_PAD_RX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_FCS_TX_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_FCS_STRIP_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_OVERSIZE_TRUNCATE_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_RX_OVERSIZE_TRUNCATE_B, 1U);
+ hnae3_set_bit(loop_en, HCLGE_MAC_TX_UNDER_MIN_ERR_B, 1U);
+ }
+
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac enable fail, ret =%d.\n", ret);
+ return;
+ }
+
+ if (!enable)
+ hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN,
+ HCLGE_LINK_STATUS_WAIT_CNT);
+}
+
+static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid,
+ u8 switch_param, u8 param_mask)
+{
+ struct hclge_mac_vlan_switch_cmd *req;
+ struct hclge_desc desc;
+ u32 func_id;
+ int ret;
+
+ func_id = hclge_get_port_number(HOST_PORT, 0, vfid, 0);
+ req = (struct hclge_mac_vlan_switch_cmd *)desc.data;
+
+ /* read current config parameter */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_SWITCH_PARAM,
+ true);
+ req->roce_sel = HCLGE_MAC_VLAN_NIC_SEL;
+ req->func_id = cpu_to_le32(func_id);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "read mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_cmd_reuse_desc(&desc, false);
+ req->switch_param = (req->switch_param & param_mask) | switch_param;
+ req->param_mask = param_mask;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "set mac vlan switch parameter fail, ret = %d\n", ret);
+ return ret;
+}
+
+static void hclge_phy_link_status_wait(struct hclge_dev *hdev,
+ int link_ret)
+{
+#define HCLGE_PHY_LINK_STATUS_NUM 200
+
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int i = 0;
+ int ret;
+
+ do {
+ ret = phy_read_status(phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "phy update link status fail, ret = %d\n", ret);
+ return;
+ }
+
+ if (phydev->link == link_ret)
+ break;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < HCLGE_PHY_LINK_STATUS_NUM);
+}
+
+static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
+ int wait_cnt)
+{
+ int link_status;
+ int i = 0;
+ int ret;
+
+ do {
+ ret = hclge_get_mac_link_status(hdev, &link_status);
+ if (ret)
+ return ret;
+ if (link_status == link_ret)
+ return 0;
+
+ msleep(HCLGE_LINK_STATUS_MS);
+ } while (++i < wait_cnt);
+ return -EBUSY;
+}
+
+static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en,
+ bool is_phy)
+{
+#define HCLGE_MAC_LINK_STATUS_NUM 100
+
+ int link_ret;
+
+ link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN;
+
+ if (is_phy)
+ hclge_phy_link_status_wait(hdev, link_ret);
+
+ return hclge_mac_link_status_wait(hdev, link_ret,
+ HCLGE_MAC_LINK_STATUS_NUM);
+}
+
+static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en)
+{
+ struct hclge_config_mac_mode_cmd *req;
+ struct hclge_desc desc;
+ u32 loop_en;
+ int ret;
+
+ req = (struct hclge_config_mac_mode_cmd *)&desc.data[0];
+ /* 1 Read out the MAC mode config at first */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAC_MODE, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mac loopback get fail, ret =%d.\n", ret);
+ return ret;
+ }
+
+ /* 2 Then setup the loopback flag */
+ loop_en = le32_to_cpu(req->txrx_pad_fcs_loop_en);
+ hnae3_set_bit(loop_en, HCLGE_MAC_APP_LP_B, en ? 1 : 0);
+
+ req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en);
+
+ /* 3 Config mac work mode with loopback flag
+ * and its original configure parameters
+ */
+ hclge_cmd_reuse_desc(&desc, false);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "mac loopback set fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_cfg_serdes_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+#define HCLGE_SERDES_RETRY_MS 10
+#define HCLGE_SERDES_RETRY_NUM 100
+
+ struct hclge_serdes_lb_cmd *req;
+ struct hclge_desc desc;
+ int ret, i = 0;
+ u8 loop_mode_b;
+
+ req = (struct hclge_serdes_lb_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK, false);
+
+ switch (loop_mode) {
+ case HNAE3_LOOP_SERIAL_SERDES:
+ loop_mode_b = HCLGE_CMD_SERDES_SERIAL_INNER_LOOP_B;
+ break;
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ loop_mode_b = HCLGE_CMD_SERDES_PARALLEL_INNER_LOOP_B;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "unsupported serdes loopback mode %d\n", loop_mode);
+ return -ENOTSUPP;
+ }
+
+ if (en) {
+ req->enable = loop_mode_b;
+ req->mask = loop_mode_b;
+ } else {
+ req->mask = loop_mode_b;
+ }
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback set fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ do {
+ msleep(HCLGE_SERDES_RETRY_MS);
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SERDES_LOOPBACK,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback get, ret = %d\n", ret);
+ return ret;
+ }
+ } while (++i < HCLGE_SERDES_RETRY_NUM &&
+ !(req->result & HCLGE_CMD_SERDES_DONE_B));
+
+ if (!(req->result & HCLGE_CMD_SERDES_DONE_B)) {
+ dev_err(&hdev->pdev->dev, "serdes loopback set timeout\n");
+ return -EBUSY;
+ } else if (!(req->result & HCLGE_CMD_SERDES_SUCCESS_B)) {
+ dev_err(&hdev->pdev->dev, "serdes loopback set failed in fw\n");
+ return -EIO;
+ }
+ return ret;
+}
+
+static int hclge_set_serdes_loopback(struct hclge_dev *hdev, bool en,
+ enum hnae3_loop loop_mode)
+{
+ int ret;
+
+ ret = hclge_cfg_serdes_loopback(hdev, en, loop_mode);
+ if (ret)
+ return ret;
+
+ hclge_cfg_mac_mode(hdev, en);
+
+ ret = hclge_mac_phy_link_status_wait(hdev, en, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "serdes loopback config mac mode timeout\n");
+
+ return ret;
+}
+
+static int hclge_enable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ if (!phydev->suspended) {
+ ret = phy_suspend(phydev);
+ if (ret)
+ return ret;
+ }
+
+ ret = phy_resume(phydev);
+ if (ret)
+ return ret;
+
+ return phy_loopback(phydev, true);
+}
+
+static int hclge_disable_phy_loopback(struct hclge_dev *hdev,
+ struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_loopback(phydev, false);
+ if (ret)
+ return ret;
+
+ return phy_suspend(phydev);
+}
+
+static int hclge_set_phy_loopback(struct hclge_dev *hdev, bool en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int ret;
+
+ if (!phydev)
+ return -ENOTSUPP;
+
+ if (en)
+ ret = hclge_enable_phy_loopback(hdev, phydev);
+ else
+ ret = hclge_disable_phy_loopback(hdev, phydev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "set phy loopback fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ hclge_cfg_mac_mode(hdev, en);
+
+ ret = hclge_mac_phy_link_status_wait(hdev, en, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "phy loopback config mac mode timeout\n");
+
+ return ret;
+}
+
+static int hclge_tqp_enable(struct hclge_dev *hdev, unsigned int tqp_id,
+ int stream_id, bool enable)
+{
+ struct hclge_desc desc;
+ struct hclge_cfg_com_tqp_queue_cmd *req =
+ (struct hclge_cfg_com_tqp_queue_cmd *)desc.data;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_COM_TQP_QUEUE, false);
+ req->tqp_id = cpu_to_le16(tqp_id & HCLGE_RING_ID_MASK);
+ req->stream_id = cpu_to_le16(stream_id);
+ if (enable)
+ req->enable |= 1U << HCLGE_TQP_ENABLE_B;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Tqp enable fail, status =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_set_loopback(struct hnae3_handle *handle,
+ enum hnae3_loop loop_mode, bool en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int i, ret;
+
+ /* Loopback can be enabled in three places: SSU, MAC, and serdes. By
+ * default, SSU loopback is enabled, so if the SMAC and the DMAC are
+ * the same, the packets are looped back in the SSU. If SSU loopback
+ * is disabled, packets can reach MAC even if SMAC is the same as DMAC.
+ */
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ u8 switch_param = en ? 0 : BIT(HCLGE_SWITCH_ALW_LPBK_B);
+
+ ret = hclge_config_switch_param(hdev, PF_VPORT_ID, switch_param,
+ HCLGE_SWITCH_ALW_LPBK_MASK);
+ if (ret)
+ return ret;
+ }
+
+ switch (loop_mode) {
+ case HNAE3_LOOP_APP:
+ ret = hclge_set_app_loopback(hdev, en);
+ break;
+ case HNAE3_LOOP_SERIAL_SERDES:
+ case HNAE3_LOOP_PARALLEL_SERDES:
+ ret = hclge_set_serdes_loopback(hdev, en, loop_mode);
+ break;
+ case HNAE3_LOOP_PHY:
+ ret = hclge_set_phy_loopback(hdev, en);
+ break;
+ default:
+ ret = -ENOTSUPP;
+ dev_err(&hdev->pdev->dev,
+ "loop_mode %d is not supported\n", loop_mode);
+ break;
+ }
+
+ if (ret)
+ return ret;
+
+ kinfo = &vport->nic.kinfo;
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ ret = hclge_tqp_enable(hdev, i, 0, en);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_set_default_loopback(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_set_app_loopback(hdev, false);
+ if (ret)
+ return ret;
+
+ ret = hclge_cfg_serdes_loopback(hdev, false, HNAE3_LOOP_SERIAL_SERDES);
+ if (ret)
+ return ret;
+
+ return hclge_cfg_serdes_loopback(hdev, false,
+ HNAE3_LOOP_PARALLEL_SERDES);
+}
+
+static void hclge_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo;
+ struct hnae3_queue *queue;
+ struct hclge_tqp *tqp;
+ int i;
+
+ kinfo = &vport->nic.kinfo;
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ queue = handle->kinfo.tqp[i];
+ tqp = container_of(queue, struct hclge_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
+
+static void hclge_flush_link_update(struct hclge_dev *hdev)
+{
+#define HCLGE_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGE_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGE_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
+static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (enable) {
+ hclge_task_schedule(hdev, 0);
+ } else {
+ /* Set the DOWN flag here to disable link updating */
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclge_flush_link_update(hdev);
+ }
+}
+
+static int hclge_ae_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ /* mac enable */
+ hclge_cfg_mac_mode(hdev, true);
+ clear_bit(HCLGE_STATE_DOWN, &hdev->state);
+ hdev->hw.mac.link = 0;
+
+ /* reset tqp stats */
+ hclge_reset_tqp_stats(handle);
+
+ hclge_mac_start_phy(hdev);
+
+ return 0;
+}
+
+static void hclge_ae_stop(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int i;
+
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ spin_lock_bh(&hdev->fd_rule_lock);
+ hclge_clear_arfs_rules(handle);
+ spin_unlock_bh(&hdev->fd_rule_lock);
+
+ /* If it is not PF reset or FLR, the firmware will disable the MAC,
+ * so it only need to stop phy here.
+ */
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) {
+ hclge_pfc_pause_en_cfg(hdev, HCLGE_PFC_TX_RX_DISABLE,
+ HCLGE_PFC_DISABLE);
+ if (hdev->reset_type != HNAE3_FUNC_RESET &&
+ hdev->reset_type != HNAE3_FLR_RESET) {
+ hclge_mac_stop_phy(hdev);
+ hclge_update_link_status(hdev);
+ return;
+ }
+ }
+
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+ hclge_reset_tqp(handle, i);
+
+ hclge_config_mac_tnl_int(hdev, false);
+
+ /* Mac disable */
+ hclge_cfg_mac_mode(hdev, false);
+
+ hclge_mac_stop_phy(hdev);
+
+ /* reset tqp stats */
+ hclge_reset_tqp_stats(handle);
+ hclge_update_link_status(hdev);
+}
+
+int hclge_vport_start(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+ vport->last_active_jiffies = jiffies;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block)) {
+ if (vport->vport_id) {
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ } else {
+ hclge_restore_hw_table(hdev);
+ }
+ }
+
+ clear_bit(vport->vport_id, hdev->vport_config_block);
+
+ return 0;
+}
+
+void hclge_vport_stop(struct hclge_vport *vport)
+{
+ clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state);
+}
+
+static int hclge_client_start(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_vport_start(vport);
+}
+
+static void hclge_client_stop(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ hclge_vport_stop(vport);
+}
+
+static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport,
+ u16 cmdq_resp, u8 resp_code,
+ enum hclge_mac_vlan_tbl_opcode op)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (cmdq_resp) {
+ dev_err(&hdev->pdev->dev,
+ "cmdq execute failed for get_mac_vlan_cmd_status,status=%u.\n",
+ cmdq_resp);
+ return -EIO;
+ }
+
+ if (op == HCLGE_MAC_VLAN_ADD) {
+ if (!resp_code || resp_code == 1)
+ return 0;
+ else if (resp_code == HCLGE_ADD_UC_OVERFLOW ||
+ resp_code == HCLGE_ADD_MC_OVERFLOW)
+ return -ENOSPC;
+
+ dev_err(&hdev->pdev->dev,
+ "add mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
+ } else if (op == HCLGE_MAC_VLAN_REMOVE) {
+ if (!resp_code) {
+ return 0;
+ } else if (resp_code == 1) {
+ dev_dbg(&hdev->pdev->dev,
+ "remove mac addr failed for miss.\n");
+ return -ENOENT;
+ }
+
+ dev_err(&hdev->pdev->dev,
+ "remove mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
+ } else if (op == HCLGE_MAC_VLAN_LKUP) {
+ if (!resp_code) {
+ return 0;
+ } else if (resp_code == 1) {
+ dev_dbg(&hdev->pdev->dev,
+ "lookup mac addr failed for miss.\n");
+ return -ENOENT;
+ }
+
+ dev_err(&hdev->pdev->dev,
+ "lookup mac addr failed for undefined, code=%u.\n",
+ resp_code);
+ return -EIO;
+ }
+
+ dev_err(&hdev->pdev->dev,
+ "unknown opcode for get_mac_vlan_cmd_status, opcode=%d.\n", op);
+
+ return -EINVAL;
+}
+
+static int hclge_update_desc_vfid(struct hclge_desc *desc, int vfid, bool clr)
+{
+#define HCLGE_VF_NUM_IN_FIRST_DESC 192
+
+ unsigned int word_num;
+ unsigned int bit_num;
+
+ if (vfid > 255 || vfid < 0)
+ return -EIO;
+
+ if (vfid >= 0 && vfid < HCLGE_VF_NUM_IN_FIRST_DESC) {
+ word_num = vfid / 32;
+ bit_num = vfid % 32;
+ if (clr)
+ desc[1].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ else
+ desc[1].data[word_num] |= cpu_to_le32(1 << bit_num);
+ } else {
+ word_num = (vfid - HCLGE_VF_NUM_IN_FIRST_DESC) / 32;
+ bit_num = vfid % 32;
+ if (clr)
+ desc[2].data[word_num] &= cpu_to_le32(~(1 << bit_num));
+ else
+ desc[2].data[word_num] |= cpu_to_le32(1 << bit_num);
+ }
+
+ return 0;
+}
+
+static bool hclge_is_all_function_id_zero(struct hclge_desc *desc)
+{
+#define HCLGE_DESC_NUMBER 3
+#define HCLGE_FUNC_NUMBER_PER_DESC 6
+ int i, j;
+
+ for (i = 1; i < HCLGE_DESC_NUMBER; i++)
+ for (j = 0; j < HCLGE_FUNC_NUMBER_PER_DESC; j++)
+ if (desc[i].data[j])
+ return false;
+
+ return true;
+}
+
+static void hclge_prepare_mac_addr(struct hclge_mac_vlan_tbl_entry_cmd *new_req,
+ const u8 *addr, bool is_mc)
+{
+ const unsigned char *mac_addr = addr;
+ u32 high_val = mac_addr[2] << 16 | (mac_addr[3] << 24) |
+ (mac_addr[0]) | (mac_addr[1] << 8);
+ u32 low_val = mac_addr[4] | (mac_addr[5] << 8);
+
+ hnae3_set_bit(new_req->flags, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ if (is_mc) {
+ hnae3_set_bit(new_req->entry_type, HCLGE_MAC_VLAN_BIT1_EN_B, 1);
+ hnae3_set_bit(new_req->mc_mac_en, HCLGE_MAC_VLAN_BIT0_EN_B, 1);
+ }
+
+ new_req->mac_addr_hi32 = cpu_to_le32(high_val);
+ new_req->mac_addr_lo16 = cpu_to_le16(low_val & 0xffff);
+}
+
+static int hclge_remove_mac_vlan_tbl(struct hclge_vport *vport,
+ struct hclge_mac_vlan_tbl_entry_cmd *req)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_REMOVE, false);
+
+ memcpy(desc.data, req, sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "del mac addr failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
+ HCLGE_MAC_VLAN_REMOVE);
+}
+
+static int hclge_lookup_mac_vlan_tbl(struct hclge_vport *vport,
+ struct hclge_mac_vlan_tbl_entry_cmd *req,
+ struct hclge_desc *desc,
+ bool is_mc)
+{
+ struct hclge_dev *hdev = vport->back;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_MAC_VLAN_ADD, true);
+ if (is_mc) {
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ memcpy(desc[0].data,
+ req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+ hclge_cmd_setup_basic_desc(&desc[1],
+ HCLGE_OPC_MAC_VLAN_ADD,
+ true);
+ desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_setup_basic_desc(&desc[2],
+ HCLGE_OPC_MAC_VLAN_ADD,
+ true);
+ ret = hclge_cmd_send(&hdev->hw, desc, 3);
+ } else {
+ memcpy(desc[0].data,
+ req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+ ret = hclge_cmd_send(&hdev->hw, desc, 1);
+ }
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "lookup mac addr failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ resp_code = (le32_to_cpu(desc[0].data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc[0].retval);
+
+ return hclge_get_mac_vlan_cmd_status(vport, retval, resp_code,
+ HCLGE_MAC_VLAN_LKUP);
+}
+
+static int hclge_add_mac_vlan_tbl(struct hclge_vport *vport,
+ struct hclge_mac_vlan_tbl_entry_cmd *req,
+ struct hclge_desc *mc_desc)
+{
+ struct hclge_dev *hdev = vport->back;
+ int cfg_status;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ if (!mc_desc) {
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc,
+ HCLGE_OPC_MAC_VLAN_ADD,
+ false);
+ memcpy(desc.data, req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
+ resp_code,
+ HCLGE_MAC_VLAN_ADD);
+ } else {
+ hclge_cmd_reuse_desc(&mc_desc[0], false);
+ mc_desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_reuse_desc(&mc_desc[1], false);
+ mc_desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ hclge_cmd_reuse_desc(&mc_desc[2], false);
+ mc_desc[2].flag &= cpu_to_le16(~HCLGE_CMD_FLAG_NEXT);
+ memcpy(mc_desc[0].data, req,
+ sizeof(struct hclge_mac_vlan_tbl_entry_cmd));
+ ret = hclge_cmd_send(&hdev->hw, mc_desc, 3);
+ resp_code = (le32_to_cpu(mc_desc[0].data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(mc_desc[0].retval);
+
+ cfg_status = hclge_get_mac_vlan_cmd_status(vport, retval,
+ resp_code,
+ HCLGE_MAC_VLAN_ADD);
+ }
+
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac addr failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ return cfg_status;
+}
+
+static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size,
+ u16 *allocated_size)
+{
+ struct hclge_umv_spc_alc_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ req = (struct hclge_umv_spc_alc_cmd *)desc.data;
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_ALLOCATE, false);
+
+ req->space_size = cpu_to_le32(space_size);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to set umv space, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ *allocated_size = le32_to_cpu(desc.data[1]);
+
+ return 0;
+}
+
+static int hclge_init_umv_space(struct hclge_dev *hdev)
+{
+ u16 allocated_size = 0;
+ int ret;
+
+ ret = hclge_set_umv_space(hdev, hdev->wanted_umv_size, &allocated_size);
+ if (ret)
+ return ret;
+
+ if (allocated_size < hdev->wanted_umv_size)
+ dev_warn(&hdev->pdev->dev,
+ "failed to alloc umv space, want %u, get %u\n",
+ hdev->wanted_umv_size, allocated_size);
+
+ hdev->max_umv_size = allocated_size;
+ hdev->priv_umv_size = hdev->max_umv_size / (hdev->num_alloc_vport + 1);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+
+ return 0;
+}
+
+static void hclge_reset_umv_space(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ vport->used_umv_num = 0;
+ }
+
+ mutex_lock(&hdev->vport_lock);
+ hdev->share_umv_size = hdev->priv_umv_size +
+ hdev->max_umv_size % (hdev->num_alloc_vport + 1);
+ mutex_unlock(&hdev->vport_lock);
+}
+
+static bool hclge_is_umv_space_full(struct hclge_vport *vport, bool need_lock)
+{
+ struct hclge_dev *hdev = vport->back;
+ bool is_full;
+
+ if (need_lock)
+ mutex_lock(&hdev->vport_lock);
+
+ is_full = (vport->used_umv_num >= hdev->priv_umv_size &&
+ hdev->share_umv_size == 0);
+
+ if (need_lock)
+ mutex_unlock(&hdev->vport_lock);
+
+ return is_full;
+}
+
+static void hclge_update_umv_space(struct hclge_vport *vport, bool is_free)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (is_free) {
+ if (vport->used_umv_num > hdev->priv_umv_size)
+ hdev->share_umv_size++;
+
+ if (vport->used_umv_num > 0)
+ vport->used_umv_num--;
+ } else {
+ if (vport->used_umv_num >= hdev->priv_umv_size &&
+ hdev->share_umv_size > 0)
+ hdev->share_umv_size--;
+ vport->used_umv_num++;
+ }
+}
+
+static struct hclge_mac_node *hclge_find_mac_node(struct list_head *list,
+ const u8 *mac_addr)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclge_update_mac_node(struct hclge_mac_node *mac_node,
+ enum HCLGE_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGE_MAC_TO_ADD:
+ if (mac_node->state == HCLGE_MAC_TO_DEL)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGE_MAC_TO_DEL:
+ if (mac_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * ACTIVE.
+ */
+ case HCLGE_MAC_ACTIVE:
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ mac_node->state = HCLGE_MAC_ACTIVE;
+
+ break;
+ }
+}
+
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_node *mac_node;
+ struct list_head *list;
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * new state, or just remove it, or do nothing.
+ */
+ mac_node = hclge_find_mac_node(list, addr);
+ if (mac_node) {
+ hclge_update_mac_node(mac_node, state);
+ spin_unlock_bh(&vport->mac_list_lock);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+ return 0;
+ }
+
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGE_MAC_TO_DEL) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ dev_err(&hdev->pdev->dev,
+ "failed to delete address %pM from mac list\n",
+ addr);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&vport->mac_list_lock);
+ return -ENOMEM;
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ return 0;
+}
+
+static int hclge_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_UC,
+ addr);
+}
+
+int hclge_add_uc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ struct hclge_desc desc;
+ u16 egress_port = 0;
+ int ret;
+
+ /* mac addr check */
+ if (is_zero_ether_addr(addr) ||
+ is_broadcast_ether_addr(addr) ||
+ is_multicast_ether_addr(addr)) {
+ dev_err(&hdev->pdev->dev,
+ "Set_uc mac err! invalid mac:%pM. is_zero:%d,is_br=%d,is_mul=%d\n",
+ addr, is_zero_ether_addr(addr),
+ is_broadcast_ether_addr(addr),
+ is_multicast_ether_addr(addr));
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
+
+ req.egress_port = cpu_to_le16(egress_port);
+
+ hclge_prepare_mac_addr(&req, addr, false);
+
+ /* Lookup the mac address in the mac_vlan table, and add
+ * it if the entry is inexistent. Repeated unicast entry
+ * is not allowed in the mac vlan table.
+ */
+ ret = hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false);
+ if (ret == -ENOENT) {
+ mutex_lock(&hdev->vport_lock);
+ if (!hclge_is_umv_space_full(vport, false)) {
+ ret = hclge_add_mac_vlan_tbl(vport, &req, NULL);
+ if (!ret)
+ hclge_update_umv_space(vport, false);
+ mutex_unlock(&hdev->vport_lock);
+ return ret;
+ }
+ mutex_unlock(&hdev->vport_lock);
+
+ if (!(vport->overflow_promisc_flags & HNAE3_OVERFLOW_UPE))
+ dev_err(&hdev->pdev->dev, "UC MAC table full(%u)\n",
+ hdev->priv_umv_size);
+
+ return -ENOSPC;
+ }
+
+ /* check if we just hit the duplicate */
+ if (!ret)
+ return -EEXIST;
+
+ return ret;
+}
+
+static int hclge_rm_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_UC,
+ addr);
+}
+
+int hclge_rm_uc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ int ret;
+
+ /* mac addr check */
+ if (is_zero_ether_addr(addr) ||
+ is_broadcast_ether_addr(addr) ||
+ is_multicast_ether_addr(addr)) {
+ dev_dbg(&hdev->pdev->dev, "Remove mac err! invalid mac:%pM.\n",
+ addr);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ hnae3_set_bit(req.entry_type, HCLGE_MAC_VLAN_BIT0_EN_B, 0);
+ hclge_prepare_mac_addr(&req, addr, false);
+ ret = hclge_remove_mac_vlan_tbl(vport, &req);
+ if (!ret || ret == -ENOENT) {
+ mutex_lock(&hdev->vport_lock);
+ hclge_update_umv_space(vport, true);
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ return ret;
+}
+
+static int hclge_add_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD, HCLGE_MAC_ADDR_MC,
+ addr);
+}
+
+int hclge_add_mc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ struct hclge_desc desc[3];
+ int status;
+
+ /* mac addr check */
+ if (!is_multicast_ether_addr(addr)) {
+ dev_err(&hdev->pdev->dev,
+ "Add mc mac err! invalid mac:%pM.\n",
+ addr);
+ return -EINVAL;
+ }
+ memset(&req, 0, sizeof(req));
+ hclge_prepare_mac_addr(&req, addr, true);
+ status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
+ if (status) {
+ /* This mac addr do not exist, add new entry for it */
+ memset(desc[0].data, 0, sizeof(desc[0].data));
+ memset(desc[1].data, 0, sizeof(desc[0].data));
+ memset(desc[2].data, 0, sizeof(desc[0].data));
+ }
+ status = hclge_update_desc_vfid(desc, vport->vport_id, false);
+ if (status)
+ return status;
+ status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+
+ /* if already overflow, not to print each time */
+ if (status == -ENOSPC &&
+ !(vport->overflow_promisc_flags & HNAE3_OVERFLOW_MPE))
+ dev_err(&hdev->pdev->dev, "mc mac vlan table is full\n");
+
+ return status;
+}
+
+static int hclge_rm_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL, HCLGE_MAC_ADDR_MC,
+ addr);
+}
+
+int hclge_rm_mc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc[3];
+
+ /* mac addr check */
+ if (!is_multicast_ether_addr(addr)) {
+ dev_dbg(&hdev->pdev->dev,
+ "Remove mc mac err! invalid mac:%pM.\n",
+ addr);
+ return -EINVAL;
+ }
+
+ memset(&req, 0, sizeof(req));
+ hclge_prepare_mac_addr(&req, addr, true);
+ status = hclge_lookup_mac_vlan_tbl(vport, &req, desc, true);
+ if (!status) {
+ /* This mac addr exist, remove this handle's VFID for it */
+ status = hclge_update_desc_vfid(desc, vport->vport_id, true);
+ if (status)
+ return status;
+
+ if (hclge_is_all_function_id_zero(desc))
+ /* All the vfid is zero, so need to delete this entry */
+ status = hclge_remove_mac_vlan_tbl(vport, &req);
+ else
+ /* Not all the vfid is zero, update the vfid */
+ status = hclge_add_mac_vlan_tbl(vport, &req, desc);
+
+ } else if (status == -ENOENT) {
+ status = 0;
+ }
+
+ return status;
+}
+
+static void hclge_sync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ int (*sync)(struct hclge_vport *,
+ const unsigned char *))
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = sync(vport, mac_node->mac_addr);
+ if (!ret) {
+ mac_node->state = HCLGE_MAC_ACTIVE;
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+
+ /* If one unicast mac address is existing in hardware,
+ * we need to try whether other unicast mac addresses
+ * are new addresses that can be added.
+ */
+ if (ret != -EEXIST)
+ break;
+ }
+ }
+}
+
+static void hclge_unsync_vport_mac_list(struct hclge_vport *vport,
+ struct list_head *list,
+ int (*unsync)(struct hclge_vport *,
+ const unsigned char *))
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = unsync(vport, mac_node->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ &vport->state);
+ break;
+ }
+ }
+}
+
+static bool hclge_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ bool all_added = true;
+
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ if (mac_node->state == HCLGE_MAC_TO_ADD)
+ all_added = false;
+
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of adding the mac address into mac
+ * table. if mac_node state is ACTIVE, then change it to TO_DEL,
+ * then it will be removed at next time. else it must be TO_ADD,
+ * this address hasn't been added into mac table,
+ * so just remove the mac node.
+ */
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclge_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_DEL;
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+
+ return all_added;
+}
+
+static void hclge_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclge_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr exists in the mac list, it means
+ * received a new TO_ADD request during the time window
+ * of configuring the mac address. For the mac node
+ * state is TO_ADD, and the address is already in the
+ * in the hardware(due to delete fail), so we just need
+ * to change the mac node state to ACTIVE.
+ */
+ new_node->state = HCLGE_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ }
+ }
+}
+
+static void hclge_update_overflow_flags(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ bool is_all_added)
+{
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_UPE;
+ else if (hclge_is_umv_space_full(vport, true))
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_UPE;
+ } else {
+ if (is_all_added)
+ vport->overflow_promisc_flags &= ~HNAE3_OVERFLOW_MPE;
+ else
+ vport->overflow_promisc_flags |= HNAE3_OVERFLOW_MPE;
+ }
+}
+
+static void hclge_sync_vport_mac_table(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+ bool all_added;
+
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
+ break;
+ }
+ }
+
+stop_traverse:
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_uc_addr_common);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
+ hclge_add_uc_addr_common);
+ } else {
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_mc_addr_common);
+ hclge_sync_vport_mac_list(vport, &tmp_add_list,
+ hclge_add_mc_addr_common);
+ }
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+ all_added = hclge_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_update_overflow_flags(vport, mac_type, all_added);
+}
+
+static bool hclge_need_sync_mac_table(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ if (test_bit(vport->vport_id, hdev->vport_config_block))
+ return false;
+
+ if (test_and_clear_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state))
+ return true;
+
+ return false;
+}
+
+static void hclge_sync_mac_table(struct hclge_dev *hdev)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ if (!hclge_need_sync_mac_table(vport))
+ continue;
+
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_UC);
+ hclge_sync_vport_mac_table(vport, HCLGE_MAC_ADDR_MC);
+ }
+}
+
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ int (*unsync)(struct hclge_vport *vport, const unsigned char *addr);
+ struct hclge_mac_node *mac_cfg, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+ int ret;
+
+ if (mac_type == HCLGE_MAC_ADDR_UC) {
+ list = &vport->uc_mac_list;
+ unsync = hclge_rm_uc_addr_common;
+ } else {
+ list = &vport->mc_mac_list;
+ unsync = hclge_rm_mc_addr_common;
+ }
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ if (!is_del_list)
+ set_bit(vport->vport_id, hdev->vport_config_block);
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_cfg, tmp, list, node) {
+ switch (mac_cfg->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
+ list_del(&mac_cfg->node);
+ list_add_tail(&mac_cfg->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ if (is_del_list) {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_cfg, tmp, &tmp_del_list, node) {
+ ret = unsync(vport, mac_cfg->mac_addr);
+ if (!ret || ret == -ENOENT) {
+ /* clear all mac addr from hardware, but remain these
+ * mac addr in the mac list, and restore them after
+ * vf reset finished.
+ */
+ if (!is_del_list &&
+ mac_cfg->state == HCLGE_MAC_ACTIVE) {
+ mac_cfg->state = HCLGE_MAC_TO_ADD;
+ } else {
+ list_del(&mac_cfg->node);
+ kfree(mac_cfg);
+ }
+ } else if (is_del_list) {
+ mac_cfg->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_sync_from_del_list(&tmp_del_list, list);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+/* remove all mac address when uninitailize */
+static void hclge_uninit_vport_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ struct list_head tmp_del_list, *list;
+
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ list = (mac_type == HCLGE_MAC_ADDR_UC) ?
+ &vport->uc_mac_list : &vport->mc_mac_list;
+
+ spin_lock_bh(&vport->mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGE_MAC_TO_DEL:
+ case HCLGE_MAC_ACTIVE:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGE_MAC_TO_ADD:
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ break;
+ }
+ }
+
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ if (mac_type == HCLGE_MAC_ADDR_UC)
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_uc_addr_common);
+ else
+ hclge_unsync_vport_mac_list(vport, &tmp_del_list,
+ hclge_rm_mc_addr_common);
+
+ if (!list_empty(&tmp_del_list))
+ dev_warn(&hdev->pdev->dev,
+ "uninit %s mac list for vport %u not completely.\n",
+ mac_type == HCLGE_MAC_ADDR_UC ? "uc" : "mc",
+ vport->vport_id);
+
+ list_for_each_entry_safe(mac_node, tmp, &tmp_del_list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+}
+
+static void hclge_uninit_mac_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_UC);
+ hclge_uninit_vport_mac_list(vport, HCLGE_MAC_ADDR_MC);
+ }
+}
+
+static int hclge_get_mac_ethertype_cmd_status(struct hclge_dev *hdev,
+ u16 cmdq_resp, u8 resp_code)
+{
+#define HCLGE_ETHERTYPE_SUCCESS_ADD 0
+#define HCLGE_ETHERTYPE_ALREADY_ADD 1
+#define HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW 2
+#define HCLGE_ETHERTYPE_KEY_CONFLICT 3
+
+ int return_status;
+
+ if (cmdq_resp) {
+ dev_err(&hdev->pdev->dev,
+ "cmdq execute failed for get_mac_ethertype_cmd_status, status=%u.\n",
+ cmdq_resp);
+ return -EIO;
+ }
+
+ switch (resp_code) {
+ case HCLGE_ETHERTYPE_SUCCESS_ADD:
+ case HCLGE_ETHERTYPE_ALREADY_ADD:
+ return_status = 0;
+ break;
+ case HCLGE_ETHERTYPE_MGR_TBL_OVERFLOW:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for manager table overflow.\n");
+ return_status = -EIO;
+ break;
+ case HCLGE_ETHERTYPE_KEY_CONFLICT:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for key conflict.\n");
+ return_status = -EIO;
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for undefined, code=%u.\n",
+ resp_code);
+ return_status = -EIO;
+ }
+
+ return return_status;
+}
+
+static bool hclge_check_vf_mac_exist(struct hclge_vport *vport, int vf_idx,
+ u8 *mac_addr)
+{
+ struct hclge_mac_vlan_tbl_entry_cmd req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u16 egress_port = 0;
+ int i;
+
+ if (is_zero_ether_addr(mac_addr))
+ return false;
+
+ memset(&req, 0, sizeof(req));
+ hnae3_set_field(egress_port, HCLGE_MAC_EPORT_VFID_M,
+ HCLGE_MAC_EPORT_VFID_S, vport->vport_id);
+ req.egress_port = cpu_to_le16(egress_port);
+ hclge_prepare_mac_addr(&req, mac_addr, false);
+
+ if (hclge_lookup_mac_vlan_tbl(vport, &req, &desc, false) != -ENOENT)
+ return true;
+
+ vf_idx += HCLGE_VF_VPORT_START_NUM;
+ for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++)
+ if (i != vf_idx &&
+ ether_addr_equal(mac_addr, hdev->vport[i].vf_info.mac))
+ return true;
+
+ return false;
+}
+
+static int hclge_set_vf_mac(struct hnae3_handle *handle, int vf,
+ u8 *mac_addr)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (ether_addr_equal(mac_addr, vport->vf_info.mac)) {
+ dev_info(&hdev->pdev->dev,
+ "Specified MAC(=%pM) is same as before, no change committed!\n",
+ mac_addr);
+ return 0;
+ }
+
+ if (hclge_check_vf_mac_exist(vport, vf, mac_addr)) {
+ dev_err(&hdev->pdev->dev, "Specified MAC(=%pM) exists!\n",
+ mac_addr);
+ return -EEXIST;
+ }
+
+ ether_addr_copy(vport->vf_info.mac, mac_addr);
+
+ if (test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ dev_info(&hdev->pdev->dev,
+ "MAC of VF %d has been set to %pM, and it will be reinitialized!\n",
+ vf, mac_addr);
+ return hclge_inform_reset_assert_to_vf(vport);
+ }
+
+ dev_info(&hdev->pdev->dev, "MAC of VF %d has been set to %pM\n",
+ vf, mac_addr);
+ return 0;
+}
+
+static int hclge_add_mgr_tbl(struct hclge_dev *hdev,
+ const struct hclge_mac_mgr_tbl_entry_cmd *req)
+{
+ struct hclge_desc desc;
+ u8 resp_code;
+ u16 retval;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_ETHTYPE_ADD, false);
+ memcpy(desc.data, req, sizeof(struct hclge_mac_mgr_tbl_entry_cmd));
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed for cmd_send, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ resp_code = (le32_to_cpu(desc.data[0]) >> 8) & 0xff;
+ retval = le16_to_cpu(desc.retval);
+
+ return hclge_get_mac_ethertype_cmd_status(hdev, retval, resp_code);
+}
+
+static int init_mgr_tbl(struct hclge_dev *hdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(hclge_mgr_table); i++) {
+ ret = hclge_add_mgr_tbl(hdev, &hclge_mgr_table[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "add mac ethertype failed, ret =%d.\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hclge_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr)
+{
+ struct list_head *list = &vport->uc_mac_list;
+ struct hclge_mac_node *old_node, *new_node;
+
+ new_node = hclge_find_mac_node(list, new_addr);
+ if (!new_node) {
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ return -ENOMEM;
+
+ new_node->state = HCLGE_MAC_TO_ADD;
+ ether_addr_copy(new_node->mac_addr, new_addr);
+ list_add(&new_node->node, list);
+ } else {
+ if (new_node->state == HCLGE_MAC_TO_DEL)
+ new_node->state = HCLGE_MAC_ACTIVE;
+
+ /* make sure the new addr is in the list head, avoid dev
+ * addr may be not re-added into mac table for the umv space
+ * limitation after global/imp reset which will clear mac
+ * table by hardware.
+ */
+ list_move(&new_node->node, list);
+ }
+
+ if (old_addr && !ether_addr_equal(old_addr, new_addr)) {
+ old_node = hclge_find_mac_node(list, old_addr);
+ if (old_node) {
+ if (old_node->state == HCLGE_MAC_TO_ADD) {
+ list_del(&old_node->node);
+ kfree(old_node);
+ } else {
+ old_node->state = HCLGE_MAC_TO_DEL;
+ }
+ }
+ }
+
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ return 0;
+}
+
+static int hclge_set_mac_addr(struct hnae3_handle *handle, void *p,
+ bool is_first)
+{
+ const unsigned char *new_addr = (const unsigned char *)p;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ unsigned char *old_addr = NULL;
+ int ret;
+
+ /* mac addr check */
+ if (is_zero_ether_addr(new_addr) ||
+ is_broadcast_ether_addr(new_addr) ||
+ is_multicast_ether_addr(new_addr)) {
+ dev_err(&hdev->pdev->dev,
+ "change uc mac err! invalid mac: %pM.\n",
+ new_addr);
+ return -EINVAL;
+ }
+
+ ret = hclge_pause_addr_cfg(hdev, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to configure mac pause address, ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ if (!is_first)
+ old_addr = hdev->hw.mac.mac_addr;
+
+ spin_lock_bh(&vport->mac_list_lock);
+ ret = hclge_update_mac_node_for_dev_addr(vport, old_addr, new_addr);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to change the mac addr:%pM, ret = %d\n",
+ new_addr, ret);
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ if (!is_first)
+ hclge_pause_addr_cfg(hdev, old_addr);
+
+ return ret;
+ }
+ /* we must update dev addr with spin lock protect, preventing dev addr
+ * being removed by set_rx_mode path.
+ */
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+
+ hclge_task_schedule(hdev, 0);
+
+ return 0;
+}
+
+static int hclge_do_ioctl(struct hnae3_handle *handle, struct ifreq *ifr,
+ int cmd)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (!hdev->hw.mac.phydev)
+ return -EOPNOTSUPP;
+
+ return phy_mii_ioctl(hdev->hw.mac.phydev, ifr, cmd);
+}
+
+static int hclge_set_vlan_filter_ctrl(struct hclge_dev *hdev, u8 vlan_type,
+ u8 fe_type, bool filter_en, u8 vf_id)
+{
+ struct hclge_vlan_filter_ctrl_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ /* read current vlan filter parameter */
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_CTRL, true);
+ req = (struct hclge_vlan_filter_ctrl_cmd *)desc.data;
+ req->vlan_type = vlan_type;
+ req->vf_id = vf_id;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get vlan filter config, ret = %d.\n", ret);
+ return ret;
+ }
+
+ /* modify and write new config parameter */
+ hclge_cmd_reuse_desc(&desc, false);
+ req->vlan_fe = filter_en ?
+ (req->vlan_fe | fe_type) : (req->vlan_fe & ~fe_type);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed to set vlan filter, ret = %d.\n",
+ ret);
+
+ return ret;
+}
+
+#define HCLGE_FILTER_TYPE_VF 0
+#define HCLGE_FILTER_TYPE_PORT 1
+#define HCLGE_FILTER_FE_EGRESS_V1_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_INGRESS_B BIT(0)
+#define HCLGE_FILTER_FE_NIC_EGRESS_B BIT(1)
+#define HCLGE_FILTER_FE_ROCE_INGRESS_B BIT(2)
+#define HCLGE_FILTER_FE_ROCE_EGRESS_B BIT(3)
+#define HCLGE_FILTER_FE_EGRESS (HCLGE_FILTER_FE_NIC_EGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_EGRESS_B)
+#define HCLGE_FILTER_FE_INGRESS (HCLGE_FILTER_FE_NIC_INGRESS_B \
+ | HCLGE_FILTER_FE_ROCE_INGRESS_B)
+
+static void hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS, enable, 0);
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, enable, 0);
+ } else {
+ hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B, enable,
+ 0);
+ }
+ if (enable)
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
+ else
+ handle->netdev_flags &= ~HNAE3_VLAN_FLTR;
+}
+
+static int hclge_set_vf_vlan_common(struct hclge_dev *hdev, u16 vfid,
+ bool is_kill, u16 vlan,
+ __be16 proto)
+{
+ struct hclge_vport *vport = &hdev->vport[vfid];
+ struct hclge_vlan_filter_vf_cfg_cmd *req0;
+ struct hclge_vlan_filter_vf_cfg_cmd *req1;
+ struct hclge_desc desc[2];
+ u8 vf_byte_val;
+ u8 vf_byte_off;
+ int ret;
+
+ /* if vf vlan table is full, firmware will close vf vlan filter, it
+ * is unable and unnecessary to add new vlan id to vf vlan filter.
+ * If spoof check is enable, and vf vlan is full, it shouldn't add
+ * new vlan, because tx packets with these vlan id will be dropped.
+ */
+ if (test_bit(vfid, hdev->vf_vlan_full) && !is_kill) {
+ if (vport->vf_info.spoofchk && vlan) {
+ dev_err(&hdev->pdev->dev,
+ "Can't add vlan due to spoof check is on and vf vlan table is full\n");
+ return -EPERM;
+ }
+ return 0;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc[0],
+ HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
+ hclge_cmd_setup_basic_desc(&desc[1],
+ HCLGE_OPC_VLAN_FILTER_VF_CFG, false);
+
+ desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+
+ vf_byte_off = vfid / 8;
+ vf_byte_val = 1 << (vfid % 8);
+
+ req0 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[0].data;
+ req1 = (struct hclge_vlan_filter_vf_cfg_cmd *)desc[1].data;
+
+ req0->vlan_id = cpu_to_le16(vlan);
+ req0->vlan_cfg = is_kill;
+
+ if (vf_byte_off < HCLGE_MAX_VF_BYTES)
+ req0->vf_bitmap[vf_byte_off] = vf_byte_val;
+ else
+ req1->vf_bitmap[vf_byte_off - HCLGE_MAX_VF_BYTES] = vf_byte_val;
+
+ ret = hclge_cmd_send(&hdev->hw, desc, 2);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Send vf vlan command fail, ret =%d.\n",
+ ret);
+ return ret;
+ }
+
+ if (!is_kill) {
+#define HCLGE_VF_VLAN_NO_ENTRY 2
+ if (!req0->resp_code || req0->resp_code == 1)
+ return 0;
+
+ if (req0->resp_code == HCLGE_VF_VLAN_NO_ENTRY) {
+ set_bit(vfid, hdev->vf_vlan_full);
+ dev_warn(&hdev->pdev->dev,
+ "vf vlan table is full, vf vlan filter is disabled\n");
+ return 0;
+ }
+
+ dev_err(&hdev->pdev->dev,
+ "Add vf vlan filter fail, ret =%u.\n",
+ req0->resp_code);
+ } else {
+#define HCLGE_VF_VLAN_DEL_NO_FOUND 1
+ if (!req0->resp_code)
+ return 0;
+
+ /* vf vlan filter is disabled when vf vlan table is full,
+ * then new vlan id will not be added into vf vlan table.
+ * Just return 0 without warning, avoid massive verbose
+ * print logs when unload.
+ */
+ if (req0->resp_code == HCLGE_VF_VLAN_DEL_NO_FOUND)
+ return 0;
+
+ dev_err(&hdev->pdev->dev,
+ "Kill vf vlan filter fail, ret =%u.\n",
+ req0->resp_code);
+ }
+
+ return -EIO;
+}
+
+static int hclge_set_port_vlan_filter(struct hclge_dev *hdev, __be16 proto,
+ u16 vlan_id, bool is_kill)
+{
+ struct hclge_vlan_filter_pf_cfg_cmd *req;
+ struct hclge_desc desc;
+ u8 vlan_offset_byte_val;
+ u8 vlan_offset_byte;
+ u8 vlan_offset_160;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_FILTER_PF_CFG, false);
+
+ vlan_offset_160 = vlan_id / HCLGE_VLAN_ID_OFFSET_STEP;
+ vlan_offset_byte = (vlan_id % HCLGE_VLAN_ID_OFFSET_STEP) /
+ HCLGE_VLAN_BYTE_SIZE;
+ vlan_offset_byte_val = 1 << (vlan_id % HCLGE_VLAN_BYTE_SIZE);
+
+ req = (struct hclge_vlan_filter_pf_cfg_cmd *)desc.data;
+ req->vlan_offset = vlan_offset_160;
+ req->vlan_cfg = is_kill;
+ req->vlan_offset_bitmap[vlan_offset_byte] = vlan_offset_byte_val;
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "port vlan command, send fail, ret =%d.\n", ret);
+ return ret;
+}
+
+static int hclge_set_vlan_filter_hw(struct hclge_dev *hdev, __be16 proto,
+ u16 vport_id, u16 vlan_id,
+ bool is_kill)
+{
+ u16 vport_idx, vport_num = 0;
+ int ret;
+
+ if (is_kill && !vlan_id)
+ return 0;
+
+ ret = hclge_set_vf_vlan_common(hdev, vport_id, is_kill, vlan_id,
+ proto);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set %u vport vlan filter config fail, ret =%d.\n",
+ vport_id, ret);
+ return ret;
+ }
+
+ /* vlan 0 may be added twice when 8021q module is enabled */
+ if (!is_kill && !vlan_id &&
+ test_bit(vport_id, hdev->vlan_table[vlan_id]))
+ return 0;
+
+ if (!is_kill && test_and_set_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_err(&hdev->pdev->dev,
+ "Add port vlan failed, vport %u is already in vlan %u\n",
+ vport_id, vlan_id);
+ return -EINVAL;
+ }
+
+ if (is_kill &&
+ !test_and_clear_bit(vport_id, hdev->vlan_table[vlan_id])) {
+ dev_err(&hdev->pdev->dev,
+ "Delete port vlan failed, vport %u is not in vlan %u\n",
+ vport_id, vlan_id);
+ return -EINVAL;
+ }
+
+ for_each_set_bit(vport_idx, hdev->vlan_table[vlan_id], HCLGE_VPORT_NUM)
+ vport_num++;
+
+ if ((is_kill && vport_num == 0) || (!is_kill && vport_num == 1))
+ ret = hclge_set_port_vlan_filter(hdev, proto, vlan_id,
+ is_kill);
+
+ return ret;
+}
+
+static int hclge_set_vlan_tx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_tx_vtag_cfg *vcfg = &vport->txvlan_cfg;
+ struct hclge_vport_vtag_tx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_TX_CFG, false);
+
+ req = (struct hclge_vport_vtag_tx_cfg_cmd *)desc.data;
+ req->def_vlan_tag1 = cpu_to_le16(vcfg->default_tag1);
+ req->def_vlan_tag2 = cpu_to_le16(vcfg->default_tag2);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG1_B,
+ vcfg->accept_tag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG1_B,
+ vcfg->accept_untag1 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_TAG2_B,
+ vcfg->accept_tag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_ACCEPT_UNTAG2_B,
+ vcfg->accept_untag2 ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG1_EN_B,
+ vcfg->insert_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_PORT_INS_TAG2_EN_B,
+ vcfg->insert_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_CFG_NIC_ROCE_SEL_B, 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
+ HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] =
+ 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port txvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_set_vlan_rx_offload_cfg(struct hclge_vport *vport)
+{
+ struct hclge_rx_vtag_cfg *vcfg = &vport->rxvlan_cfg;
+ struct hclge_vport_vtag_rx_cfg_cmd *req;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u16 bmap_index;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_VLAN_PORT_RX_CFG, false);
+
+ req = (struct hclge_vport_vtag_rx_cfg_cmd *)desc.data;
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG1_EN_B,
+ vcfg->strip_tag1_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_REM_TAG2_EN_B,
+ vcfg->strip_tag2_en ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG1_EN_B,
+ vcfg->vlan1_vlan_prionly ? 1 : 0);
+ hnae3_set_bit(req->vport_vlan_cfg, HCLGE_SHOW_TAG2_EN_B,
+ vcfg->vlan2_vlan_prionly ? 1 : 0);
+
+ req->vf_offset = vport->vport_id / HCLGE_VF_NUM_PER_CMD;
+ bmap_index = vport->vport_id % HCLGE_VF_NUM_PER_CMD /
+ HCLGE_VF_NUM_PER_BYTE;
+ req->vf_bitmap[bmap_index] =
+ 1U << (vport->vport_id % HCLGE_VF_NUM_PER_BYTE);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send port rxvlan cfg command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_vlan_offload_cfg(struct hclge_vport *vport,
+ u16 port_base_vlan_state,
+ u16 vlan_tag)
+{
+ int ret;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->txvlan_cfg.accept_tag1 = true;
+ vport->txvlan_cfg.insert_tag1_en = false;
+ vport->txvlan_cfg.default_tag1 = 0;
+ } else {
+ vport->txvlan_cfg.accept_tag1 = false;
+ vport->txvlan_cfg.insert_tag1_en = true;
+ vport->txvlan_cfg.default_tag1 = vlan_tag;
+ }
+
+ vport->txvlan_cfg.accept_untag1 = true;
+
+ /* accept_tag2 and accept_untag2 are not supported on
+ * pdev revision(0x20), new revision support them,
+ * this two fields can not be configured by user.
+ */
+ vport->txvlan_cfg.accept_tag2 = true;
+ vport->txvlan_cfg.accept_untag2 = true;
+ vport->txvlan_cfg.insert_tag2_en = false;
+ vport->txvlan_cfg.default_tag2 = 0;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en =
+ vport->rxvlan_cfg.rx_vlan_offload_en;
+ } else {
+ vport->rxvlan_cfg.strip_tag1_en =
+ vport->rxvlan_cfg.rx_vlan_offload_en;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ }
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+
+ ret = hclge_set_vlan_tx_offload_cfg(vport);
+ if (ret)
+ return ret;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
+static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
+{
+ struct hclge_rx_vlan_type_cfg_cmd *rx_req;
+ struct hclge_tx_vlan_type_cfg_cmd *tx_req;
+ struct hclge_desc desc;
+ int status;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_TYPE_ID, false);
+ rx_req = (struct hclge_rx_vlan_type_cfg_cmd *)desc.data;
+ rx_req->ot_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_fst_vlan_type);
+ rx_req->ot_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_ot_sec_vlan_type);
+ rx_req->in_fst_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_fst_vlan_type);
+ rx_req->in_sec_vlan_type =
+ cpu_to_le16(hdev->vlan_type_cfg.rx_in_sec_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Send rxvlan protocol type command fail, ret =%d\n",
+ status);
+ return status;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MAC_VLAN_INSERT, false);
+
+ tx_req = (struct hclge_tx_vlan_type_cfg_cmd *)desc.data;
+ tx_req->ot_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_ot_vlan_type);
+ tx_req->in_vlan_type = cpu_to_le16(hdev->vlan_type_cfg.tx_in_vlan_type);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "Send txvlan protocol type command fail, ret =%d\n",
+ status);
+
+ return status;
+}
+
+static int hclge_init_vlan_config(struct hclge_dev *hdev)
+{
+#define HCLGE_DEF_VLAN_TYPE 0x8100
+
+ struct hnae3_handle *handle = &hdev->vport[0].nic;
+ struct hclge_vport *vport;
+ int ret;
+ int i;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ /* for revision 0x21, vf vlan filter is per function */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ ret = hclge_set_vlan_filter_ctrl(hdev,
+ HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS,
+ true,
+ vport->vport_id);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
+ HCLGE_FILTER_FE_INGRESS, true,
+ 0);
+ if (ret)
+ return ret;
+ } else {
+ ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_EGRESS_V1_B,
+ true, 0);
+ if (ret)
+ return ret;
+ }
+
+ handle->netdev_flags |= HNAE3_VLAN_FLTR;
+
+ hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
+ hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
+
+ ret = hclge_set_vlan_protocol_type(hdev);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ u16 vlan_tag;
+
+ vport = &hdev->vport[i];
+ vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+
+ ret = hclge_vlan_offload_cfg(vport,
+ vport->port_base_vlan_cfg.state,
+ vlan_tag);
+ if (ret)
+ return ret;
+ }
+
+ return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
+}
+
+static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool writen_to_tbl)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node)
+ if (vlan->vlan_id == vlan_id)
+ return;
+
+ vlan = kzalloc(sizeof(*vlan), GFP_KERNEL);
+ if (!vlan)
+ return;
+
+ vlan->hd_tbl_status = writen_to_tbl;
+ vlan->vlan_id = vlan_id;
+
+ list_add_tail(&vlan->node, &vport->vlan_list);
+}
+
+static int hclge_add_vport_all_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (!vlan->hd_tbl_status) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "restore vport vlan list failed, ret=%d\n",
+ ret);
+ return ret;
+ }
+ }
+ vlan->hd_tbl_status = true;
+ }
+
+ return 0;
+}
+
+static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
+ bool is_write_tbl)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->vlan_id == vlan_id) {
+ if (is_write_tbl && vlan->hd_tbl_status)
+ hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan_id,
+ true);
+
+ list_del(&vlan->node);
+ kfree(vlan);
+ break;
+ }
+ }
+}
+
+void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ if (vlan->hd_tbl_status)
+ hclge_set_vlan_filter_hw(hdev,
+ htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id,
+ true);
+
+ vlan->hd_tbl_status = false;
+ if (is_del_list) {
+ list_del(&vlan->node);
+ kfree(vlan);
+ }
+ }
+ clear_bit(vport->vport_id, hdev->vf_vlan_full);
+}
+
+void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ vport = &hdev->vport[i];
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ list_del(&vlan->node);
+ kfree(vlan);
+ }
+ }
+}
+
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport)
+{
+ struct hclge_vport_vlan_cfg *vlan, *tmp;
+ struct hclge_dev *hdev = vport->back;
+ u16 vlan_proto;
+ u16 vlan_id;
+ u16 state;
+ int ret;
+
+ vlan_proto = vport->port_base_vlan_cfg.vlan_info.vlan_proto;
+ vlan_id = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
+ state = vport->port_base_vlan_cfg.state;
+
+ if (state != HNAE3_PORT_BASE_VLAN_DISABLE) {
+ clear_bit(vport->vport_id, hdev->vlan_table[vlan_id]);
+ hclge_set_vlan_filter_hw(hdev, htons(vlan_proto),
+ vport->vport_id, vlan_id,
+ false);
+ return;
+ }
+
+ list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan->vlan_id, false);
+ if (ret)
+ break;
+ vlan->hd_tbl_status = true;
+ }
+}
+
+/* For global reset and imp reset, hardware will clear the mac table,
+ * so we change the mac address state from ACTIVE to TO_ADD, then they
+ * can be restored in the service task after reset complete. Furtherly,
+ * the mac addresses with state TO_DEL or DEL_FAIL are unnecessary to
+ * be restored after reset, so just remove these mac nodes from mac_list.
+ */
+static void hclge_mac_node_convert_for_reset(struct list_head *list)
+{
+ struct hclge_mac_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ if (mac_node->state == HCLGE_MAC_ACTIVE) {
+ mac_node->state = HCLGE_MAC_TO_ADD;
+ } else if (mac_node->state == HCLGE_MAC_TO_DEL) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+void hclge_restore_mac_table_common(struct hclge_vport *vport)
+{
+ spin_lock_bh(&vport->mac_list_lock);
+
+ hclge_mac_node_convert_for_reset(&vport->uc_mac_list);
+ hclge_mac_node_convert_for_reset(&vport->mc_mac_list);
+ set_bit(HCLGE_VPORT_STATE_MAC_TBL_CHANGE, &vport->state);
+
+ spin_unlock_bh(&vport->mac_list_lock);
+}
+
+static void hclge_restore_hw_table(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+
+ hclge_restore_mac_table_common(vport);
+ hclge_restore_vport_vlan_table(vport);
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+
+ hclge_restore_fd_entries(handle);
+}
+
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ if (vport->port_base_vlan_cfg.state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ vport->rxvlan_cfg.strip_tag1_en = false;
+ vport->rxvlan_cfg.strip_tag2_en = enable;
+ } else {
+ vport->rxvlan_cfg.strip_tag1_en = enable;
+ vport->rxvlan_cfg.strip_tag2_en = true;
+ }
+ vport->rxvlan_cfg.vlan1_vlan_prionly = false;
+ vport->rxvlan_cfg.vlan2_vlan_prionly = false;
+ vport->rxvlan_cfg.rx_vlan_offload_en = enable;
+
+ return hclge_set_vlan_rx_offload_cfg(vport);
+}
+
+static int hclge_update_vlan_filter_entries(struct hclge_vport *vport,
+ u16 port_base_vlan_state,
+ struct hclge_vlan_info *new_info,
+ struct hclge_vlan_info *old_info)
+{
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ if (port_base_vlan_state == HNAE3_PORT_BASE_VLAN_ENABLE) {
+ hclge_rm_vport_all_vlan_table(vport, false);
+ return hclge_set_vlan_filter_hw(hdev,
+ htons(new_info->vlan_proto),
+ vport->vport_id,
+ new_info->vlan_tag,
+ false);
+ }
+
+ ret = hclge_set_vlan_filter_hw(hdev, htons(old_info->vlan_proto),
+ vport->vport_id, old_info->vlan_tag,
+ true);
+ if (ret)
+ return ret;
+
+ return hclge_add_vport_all_vlan_table(vport);
+}
+
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+ struct hclge_vlan_info *vlan_info)
+{
+ struct hnae3_handle *nic = &vport->nic;
+ struct hclge_vlan_info *old_vlan_info;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+ ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag);
+ if (ret)
+ return ret;
+
+ if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
+ /* add new VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev,
+ htons(vlan_info->vlan_proto),
+ vport->vport_id,
+ vlan_info->vlan_tag,
+ false);
+ if (ret)
+ return ret;
+
+ /* remove old VLAN tag */
+ ret = hclge_set_vlan_filter_hw(hdev,
+ htons(old_vlan_info->vlan_proto),
+ vport->vport_id,
+ old_vlan_info->vlan_tag,
+ true);
+ if (ret)
+ return ret;
+
+ goto update;
+ }
+
+ ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
+ old_vlan_info);
+ if (ret)
+ return ret;
+
+ /* update state only when disable/enable port based VLAN */
+ vport->port_base_vlan_cfg.state = state;
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+
+update:
+ vport->port_base_vlan_cfg.vlan_info.vlan_tag = vlan_info->vlan_tag;
+ vport->port_base_vlan_cfg.vlan_info.qos = vlan_info->qos;
+ vport->port_base_vlan_cfg.vlan_info.vlan_proto = vlan_info->vlan_proto;
+
+ return 0;
+}
+
+static u16 hclge_get_port_base_vlan_state(struct hclge_vport *vport,
+ enum hnae3_port_base_vlan_state state,
+ u16 vlan)
+{
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ if (!vlan)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+ else
+ return HNAE3_PORT_BASE_VLAN_ENABLE;
+ } else {
+ if (!vlan)
+ return HNAE3_PORT_BASE_VLAN_DISABLE;
+ else if (vport->port_base_vlan_cfg.vlan_info.vlan_tag == vlan)
+ return HNAE3_PORT_BASE_VLAN_NOCHANGE;
+ else
+ return HNAE3_PORT_BASE_VLAN_MODIFY;
+ }
+}
+
+static int hclge_set_vf_vlan_filter(struct hnae3_handle *handle, int vfid,
+ u16 vlan, u8 qos, __be16 proto)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vlan_info vlan_info;
+ u16 state;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ vport = hclge_get_vf_vport(hdev, vfid);
+ if (!vport)
+ return -EINVAL;
+
+ /* qos is a 3 bits value, so can not be bigger than 7 */
+ if (vlan > VLAN_N_VID - 1 || qos > 7)
+ return -EINVAL;
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ state = hclge_get_port_base_vlan_state(vport,
+ vport->port_base_vlan_cfg.state,
+ vlan);
+ if (state == HNAE3_PORT_BASE_VLAN_NOCHANGE)
+ return 0;
+
+ vlan_info.vlan_tag = vlan;
+ vlan_info.qos = qos;
+ vlan_info.vlan_proto = ntohs(proto);
+
+ if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) {
+ return hclge_update_port_base_vlan_cfg(vport, state,
+ &vlan_info);
+ } else {
+ ret = hclge_push_vf_port_base_vlan_info(&hdev->vport[0],
+ vport->vport_id, state,
+ vlan, qos,
+ ntohs(proto));
+ return ret;
+ }
+}
+
+static void hclge_clear_vf_vlan(struct hclge_dev *hdev)
+{
+ struct hclge_vlan_info *vlan_info;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* clear port base vlan for all vf */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ vport = &hdev->vport[vf];
+ vlan_info = &vport->port_base_vlan_cfg.vlan_info;
+
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id,
+ vlan_info->vlan_tag, true);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to clear vf vlan for vf%d, ret = %d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ bool writen_to_tbl = false;
+ int ret = 0;
+
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
+ if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ return -EBUSY;
+ }
+
+ /* when port base vlan enabled, we use port base vlan as the vlan
+ * filter entry. In this case, we don't update vlan filter table
+ * when user add new vlan or remove exist vlan, just update the vport
+ * vlan list. The vlan id in vlan list will be writen in vlan filter
+ * table until port base vlan disabled
+ */
+ if (handle->port_base_vlan_state == HNAE3_PORT_BASE_VLAN_DISABLE) {
+ ret = hclge_set_vlan_filter_hw(hdev, proto, vport->vport_id,
+ vlan_id, is_kill);
+ writen_to_tbl = true;
+ }
+
+ if (!ret) {
+ if (!is_kill)
+ hclge_add_vport_vlan_table(vport, vlan_id,
+ writen_to_tbl);
+ else if (is_kill && vlan_id != 0)
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+ } else if (is_kill) {
+ /* when remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack
+ */
+ set_bit(vlan_id, vport->vlan_del_fail_bmap);
+ }
+ return ret;
+}
+
+static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
+{
+#define HCLGE_MAX_SYNC_COUNT 60
+
+ int i, ret, sync_cnt = 0;
+ u16 vlan_id;
+
+ /* start from vport 1 for PF is always alive */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
+ vport->vport_id, vlan_id,
+ true);
+ if (ret && ret != -EINVAL)
+ return;
+
+ clear_bit(vlan_id, vport->vlan_del_fail_bmap);
+ hclge_rm_vport_vlan_table(vport, vlan_id, false);
+
+ sync_cnt++;
+ if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
+ return;
+
+ vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
+ VLAN_N_VID);
+ }
+ }
+}
+
+static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps)
+{
+ struct hclge_config_max_frm_size_cmd *req;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false);
+
+ req = (struct hclge_config_max_frm_size_cmd *)desc.data;
+ req->max_frm_size = cpu_to_le16(new_mps);
+ req->min_frm_size = HCLGE_MAC_MIN_FRAME;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+
+ return hclge_set_vport_mtu(vport, new_mtu);
+}
+
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu)
+{
+ struct hclge_dev *hdev = vport->back;
+ int i, max_frm_size, ret;
+
+ /* HW supprt 2 layer vlan */
+ max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+ if (max_frm_size < HCLGE_MAC_MIN_FRAME ||
+ max_frm_size > HCLGE_MAC_MAX_FRAME)
+ return -EINVAL;
+
+ max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME);
+ mutex_lock(&hdev->vport_lock);
+ /* VF's mps must fit within hdev->mps */
+ if (vport->vport_id && max_frm_size > hdev->mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ } else if (vport->vport_id) {
+ vport->mps = max_frm_size;
+ mutex_unlock(&hdev->vport_lock);
+ return 0;
+ }
+
+ /* PF's mps must be greater then VF's mps */
+ for (i = 1; i < hdev->num_alloc_vport; i++)
+ if (max_frm_size < hdev->vport[i].mps) {
+ mutex_unlock(&hdev->vport_lock);
+ return -EINVAL;
+ }
+
+ hclge_notify_client(hdev, HNAE3_DOWN_CLIENT);
+
+ ret = hclge_set_mac_mtu(hdev, max_frm_size);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Change mtu fail, ret =%d\n", ret);
+ goto out;
+ }
+
+ hdev->mps = max_frm_size;
+ vport->mps = max_frm_size;
+
+ ret = hclge_buffer_alloc(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Allocate buffer fail, ret =%d\n", ret);
+
+out:
+ hclge_notify_client(hdev, HNAE3_UP_CLIENT);
+ mutex_unlock(&hdev->vport_lock);
+ return ret;
+}
+
+static int hclge_send_reset_tqp_cmd(struct hclge_dev *hdev, u16 queue_id,
+ bool enable)
+{
+ struct hclge_reset_tqp_queue_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, false);
+
+ req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
+ if (enable)
+ hnae3_set_bit(req->reset_req, HCLGE_TQP_RESET_B, 1U);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Send tqp reset cmd error, status =%d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id)
+{
+ struct hclge_reset_tqp_queue_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RESET_TQP_QUEUE, true);
+
+ req = (struct hclge_reset_tqp_queue_cmd *)desc.data;
+ req->tqp_id = cpu_to_le16(queue_id & HCLGE_RING_ID_MASK);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get reset status error, status =%d\n", ret);
+ return ret;
+ }
+
+ return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B);
+}
+
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hnae3_queue *queue;
+ struct hclge_tqp *tqp;
+
+ queue = handle->kinfo.tqp[queue_id];
+ tqp = container_of(queue, struct hclge_tqp, q);
+
+ return tqp->index;
+}
+
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int reset_try_times = 0;
+ int reset_status;
+ u16 queue_gid;
+ int ret;
+
+ queue_gid = hclge_covert_handle_qid_global(handle, queue_id);
+
+ ret = hclge_tqp_enable(hdev, queue_id, 0, false);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "Disable tqp fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Send reset tqp cmd fail, ret = %d\n", ret);
+ return ret;
+ }
+
+ while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
+ reset_status = hclge_get_reset_status(hdev, queue_gid);
+ if (reset_status)
+ break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
+ }
+
+ if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev, "Reset TQP fail\n");
+ return ret;
+ }
+
+ ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Deassert the soft reset fail, ret = %d\n", ret);
+
+ return ret;
+}
+
+void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id)
+{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ int reset_try_times = 0;
+ int reset_status;
+ u16 queue_gid;
+ int ret;
+
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n",
+ queue_id);
+ return;
+ }
+
+ queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id);
+
+ ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true);
+ if (ret) {
+ dev_warn(&hdev->pdev->dev,
+ "Send reset tqp cmd fail, ret = %d\n", ret);
+ return;
+ }
+
+ while (reset_try_times++ < HCLGE_TQP_RESET_TRY_TIMES) {
+ reset_status = hclge_get_reset_status(hdev, queue_gid);
+ if (reset_status)
+ break;
+
+ /* Wait for tqp hw reset */
+ usleep_range(1000, 1200);
+ }
+
+ if (reset_try_times >= HCLGE_TQP_RESET_TRY_TIMES) {
+ dev_warn(&hdev->pdev->dev, "Reset TQP fail\n");
+ return;
+ }
+
+ ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "Deassert the soft reset fail, ret = %d\n", ret);
+}
+
+static u32 hclge_get_fw_version(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hdev->fw_version;
+}
+
+static void hclge_set_flowctrl_adv(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_set_asym_pause(phydev, rx_en, tx_en);
+}
+
+static int hclge_cfg_pauseparam(struct hclge_dev *hdev, u32 rx_en, u32 tx_en)
+{
+ int ret;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+ return 0;
+
+ ret = hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "configure pauseparam error, ret = %d.\n", ret);
+
+ return ret;
+}
+
+int hclge_cfg_flowctrl(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u16 remote_advertising = 0;
+ u16 local_advertising;
+ u32 rx_pause, tx_pause;
+ u8 flowctl;
+
+ if (!phydev->link || !phydev->autoneg)
+ return 0;
+
+ local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising);
+
+ if (phydev->pause)
+ remote_advertising = LPA_PAUSE_CAP;
+
+ if (phydev->asym_pause)
+ remote_advertising |= LPA_PAUSE_ASYM;
+
+ flowctl = mii_resolve_flowctrl_fdx(local_advertising,
+ remote_advertising);
+ tx_pause = flowctl & FLOW_CTRL_TX;
+ rx_pause = flowctl & FLOW_CTRL_RX;
+
+ if (phydev->duplex == HCLGE_MAC_HALF) {
+ tx_pause = 0;
+ rx_pause = 0;
+ }
+
+ return hclge_cfg_pauseparam(hdev, rx_pause, tx_pause);
+}
+
+static void hclge_get_pauseparam(struct hnae3_handle *handle, u32 *auto_neg,
+ u32 *rx_en, u32 *tx_en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ *auto_neg = phydev ? hclge_get_autoneg(handle) : 0;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ *rx_en = 0;
+ *tx_en = 0;
+ return;
+ }
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_RX_PAUSE) {
+ *rx_en = 1;
+ *tx_en = 0;
+ } else if (hdev->tm_info.fc_mode == HCLGE_FC_TX_PAUSE) {
+ *tx_en = 1;
+ *rx_en = 0;
+ } else if (hdev->tm_info.fc_mode == HCLGE_FC_FULL) {
+ *rx_en = 1;
+ *tx_en = 1;
+ } else {
+ *rx_en = 0;
+ *tx_en = 0;
+ }
+}
+
+static void hclge_record_user_pauseparam(struct hclge_dev *hdev,
+ u32 rx_en, u32 tx_en)
+{
+ if (rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_FULL;
+ else if (rx_en && !tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_RX_PAUSE;
+ else if (!rx_en && tx_en)
+ hdev->fc_mode_last_time = HCLGE_FC_TX_PAUSE;
+ else
+ hdev->fc_mode_last_time = HCLGE_FC_NONE;
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+}
+
+static int hclge_set_pauseparam(struct hnae3_handle *handle, u32 auto_neg,
+ u32 rx_en, u32 tx_en)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ u32 fc_autoneg;
+
+ if (phydev) {
+ fc_autoneg = hclge_get_autoneg(handle);
+ if (auto_neg != fc_autoneg) {
+ dev_info(&hdev->pdev->dev,
+ "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC) {
+ dev_info(&hdev->pdev->dev,
+ "Priority flow control enabled. Cannot set link flow control.\n");
+ return -EOPNOTSUPP;
+ }
+
+ hclge_set_flowctrl_adv(hdev, rx_en, tx_en);
+
+ hclge_record_user_pauseparam(hdev, rx_en, tx_en);
+
+ if (!auto_neg)
+ return hclge_cfg_pauseparam(hdev, rx_en, tx_en);
+
+ if (phydev)
+ return phy_start_aneg(phydev);
+
+ return -EOPNOTSUPP;
+}
+
+static void hclge_get_ksettings_an_result(struct hnae3_handle *handle,
+ u8 *auto_neg, u32 *speed, u8 *duplex)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ if (speed)
+ *speed = hdev->hw.mac.speed;
+ if (duplex)
+ *duplex = hdev->hw.mac.duplex;
+ if (auto_neg)
+ *auto_neg = hdev->hw.mac.autoneg;
+}
+
+static void hclge_get_media_type(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ /* When nic is down, the service task is not running, doesn't update
+ * the port information per second. Query the port information before
+ * return the media type, ensure getting the correct media information.
+ */
+ hclge_update_port_info(hdev);
+
+ if (media_type)
+ *media_type = hdev->hw.mac.media_type;
+
+ if (module_type)
+ *module_type = hdev->hw.mac.module_type;
+}
+
+static void hclge_get_mdix_mode(struct hnae3_handle *handle,
+ u8 *tp_mdix_ctrl, u8 *tp_mdix)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ int mdix_ctrl, mdix, is_resolved;
+ unsigned int retval;
+
+ if (!phydev) {
+ *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ *tp_mdix = ETH_TP_MDI_INVALID;
+ return;
+ }
+
+ phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_MDIX);
+
+ retval = phy_read(phydev, HCLGE_PHY_CSC_REG);
+ mdix_ctrl = hnae3_get_field(retval, HCLGE_PHY_MDIX_CTRL_M,
+ HCLGE_PHY_MDIX_CTRL_S);
+
+ retval = phy_read(phydev, HCLGE_PHY_CSS_REG);
+ mdix = hnae3_get_bit(retval, HCLGE_PHY_MDIX_STATUS_B);
+ is_resolved = hnae3_get_bit(retval, HCLGE_PHY_SPEED_DUP_RESOLVE_B);
+
+ phy_write(phydev, HCLGE_PHY_PAGE_REG, HCLGE_PHY_PAGE_COPPER);
+
+ switch (mdix_ctrl) {
+ case 0x0:
+ *tp_mdix_ctrl = ETH_TP_MDI;
+ break;
+ case 0x1:
+ *tp_mdix_ctrl = ETH_TP_MDI_X;
+ break;
+ case 0x3:
+ *tp_mdix_ctrl = ETH_TP_MDI_AUTO;
+ break;
+ default:
+ *tp_mdix_ctrl = ETH_TP_MDI_INVALID;
+ break;
+ }
+
+ if (!is_resolved)
+ *tp_mdix = ETH_TP_MDI_INVALID;
+ else if (mdix)
+ *tp_mdix = ETH_TP_MDI_X;
+ else
+ *tp_mdix = ETH_TP_MDI;
+}
+
+static void hclge_info_show(struct hclge_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "PF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "Numbers of vmdp vports: %u\n", hdev->num_vmdq_vport);
+ dev_info(dev, "Numbers of VF for this PF: %u\n", hdev->num_req_vfs);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "Total buffer size for TX/RX: %u\n", hdev->pkt_buf_size);
+ dev_info(dev, "TX buffer size for each TC: %u\n", hdev->tx_buf_size);
+ dev_info(dev, "DV buffer size for each TC: %u\n", hdev->dv_buf_size);
+ dev_info(dev, "This is %s PF\n",
+ hdev->flag & HCLGE_FLAG_MAIN ? "main" : "not main");
+ dev_info(dev, "DCB %s\n",
+ hdev->flag & HCLGE_FLAG_DCB_ENABLE ? "enable" : "disable");
+ dev_info(dev, "MQPRIO %s\n",
+ hdev->flag & HCLGE_FLAG_MQPRIO_ENABLE ? "enable" : "disable");
+
+ dev_info(dev, "PF info end.\n");
+}
+
+static int hclge_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_client *client = vport->nic.client;
+ struct hclge_dev *hdev = ae_dev->priv;
+ int rst_cnt = hdev->rst_stats.reset_cnt;
+ int ret;
+
+ ret = client->ops->init_instance(&vport->nic);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_nic_err;
+ }
+
+ /* Enable nic hw error interrupts */
+ ret = hclge_config_nic_hw_error(hdev, true);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable hw error interrupts\n", ret);
+ goto init_nic_err;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ if (netif_msg_drv(&hdev->vport->nic))
+ hclge_info_show(hdev);
+
+ return ret;
+
+init_nic_err:
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ client->ops->uninit_instance(&vport->nic, 0);
+
+ return ret;
+}
+
+static int hclge_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hnae3_client *client;
+ int rst_cnt;
+ int ret;
+
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
+
+ client = hdev->roce_client;
+ ret = hclge_init_roce_base_info(vport);
+ if (ret)
+ return ret;
+
+ rst_cnt = hdev->rst_stats.reset_cnt;
+ ret = client->ops->init_instance(&vport->roce);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.reset_cnt) {
+ ret = -EBUSY;
+ goto init_roce_err;
+ }
+
+ /* Enable roce ras interrupts */
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&ae_dev->pdev->dev,
+ "fail(%d) to enable roce ras interrupts\n", ret);
+ goto init_roce_err;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+
+init_roce_err:
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce, 0);
+
+ return ret;
+}
+
+static int hclge_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_vport *vport;
+ int i, ret;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ vport = &hdev->vport[i];
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ vport->nic.client = client;
+ ret = hclge_init_nic_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_nic;
+
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
+
+ break;
+ case HNAE3_CLIENT_ROCE:
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_client = client;
+ vport->roce.client = client;
+ }
+
+ ret = hclge_init_roce_client_instance(ae_dev, vport);
+ if (ret)
+ goto clear_roce;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+
+clear_nic:
+ hdev->nic_client = NULL;
+ vport->nic.client = NULL;
+ return ret;
+clear_roce:
+ hdev->roce_client = NULL;
+ vport->roce.client = NULL;
+ return ret;
+}
+
+static void hclge_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_vport *vport;
+ int i;
+
+ for (i = 0; i < hdev->num_vmdq_vport + 1; i++) {
+ vport = &hdev->vport[i];
+ if (hdev->roce_client) {
+ clear_bit(HCLGE_STATE_ROCE_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ hdev->roce_client->ops->uninit_instance(&vport->roce,
+ 0);
+ hdev->roce_client = NULL;
+ vport->roce.client = NULL;
+ }
+ if (client->type == HNAE3_CLIENT_ROCE)
+ return;
+ if (hdev->nic_client && client->ops->uninit_instance) {
+ clear_bit(HCLGE_STATE_NIC_REGISTERED, &hdev->state);
+ while (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGE_WAIT_RESET_DONE);
+
+ client->ops->uninit_instance(&vport->nic, 0);
+ hdev->nic_client = NULL;
+ vport->nic.client = NULL;
+ }
+ }
+}
+
+static int hclge_pci_init(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclge_hw *hw;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ return ret;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "can't set consistent PCI DMA");
+ goto err_disable_device;
+ }
+ dev_warn(&pdev->dev, "set DMA mask to 32 bits\n");
+ }
+
+ ret = pci_request_regions(pdev, HCLGE_DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+ hw = &hdev->hw;
+ hw->io_base = pcim_iomap(pdev, 2, 0);
+ if (!hw->io_base) {
+ dev_err(&pdev->dev, "Can't map configuration register space\n");
+ ret = -ENOMEM;
+ goto err_clr_master;
+ }
+
+ hdev->num_req_vfs = pci_sriov_get_totalvfs(pdev);
+
+ return 0;
+err_clr_master:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void hclge_pci_uninit(struct hclge_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pcim_iounmap(pdev, hdev->hw.io_base);
+ pci_free_irq_vectors(pdev);
+ pci_clear_master(pdev);
+ pci_release_mem_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static void hclge_state_init(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_SERVICE_INITED, &hdev->state);
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ clear_bit(HCLGE_STATE_RST_FAIL, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclge_state_uninit(struct hclge_dev *hdev)
+{
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+ set_bit(HCLGE_STATE_REMOVING, &hdev->state);
+
+ if (hdev->reset_timer.function)
+ del_timer_sync(&hdev->reset_timer);
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
+}
+
+static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGE_FLR_RETRY_WAIT_MS 500
+#define HCLGE_FLR_RETRY_CNT 5
+
+ struct hclge_dev *hdev = ae_dev->priv;
+ int retry_cnt = 0;
+ int ret;
+
+retry:
+ down(&hdev->reset_sem);
+ set_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = HNAE3_FLR_RESET;
+ ret = hclge_reset_prepare(hdev);
+ if (ret || hdev->reset_pending) {
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ ret);
+ if (hdev->reset_pending ||
+ retry_cnt++ < HCLGE_FLR_RETRY_CNT) {
+ dev_err(&hdev->pdev->dev,
+ "reset_pending:0x%lx, retry_cnt:%d\n",
+ hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGE_FLR_RETRY_WAIT_MS);
+ goto retry;
+ }
+ }
+
+ /* disable misc vector before FLR done */
+ hclge_enable_vector(&hdev->misc_vector, false);
+ set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state);
+ hdev->rst_stats.flr_rst_cnt++;
+}
+
+static void hclge_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ int ret;
+
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclge_reset_rebuild(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "fail to rebuild, ret=%d\n", ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGE_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+}
+
+static void hclge_clear_resetting_state(struct hclge_dev *hdev)
+{
+ u16 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ struct hclge_vport *vport = &hdev->vport[i];
+ int ret;
+
+ /* Send cmd to clear VF's FUNC_RST_ING */
+ ret = hclge_set_vf_rst(hdev, vport->vport_id, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "clear vf(%u) rst failed %d!\n",
+ vport->vport_id, ret);
+ }
+}
+
+static int hclge_clear_hw_resource(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ /* This new command is only supported by new firmware, it will
+ * fail with older firmware. Error value -EOPNOSUPP can only be
+ * returned by older firmware running this command, to keep code
+ * backward compatible we will override this value and return
+ * success.
+ */
+ if (ret && ret != -EOPNOTSUPP) {
+ dev_err(&hdev->pdev->dev,
+ "failed to clear hw resource, ret = %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ struct hclge_dev *hdev;
+ int ret;
+
+ hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->pdev = pdev;
+ hdev->ae_dev = ae_dev;
+ hdev->reset_type = HNAE3_NONE_RESET;
+ hdev->reset_level = HNAE3_FUNC_RESET;
+ ae_dev->priv = hdev;
+
+ /* HW supprt 2 layer vlan */
+ hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN;
+
+ mutex_init(&hdev->vport_lock);
+ spin_lock_init(&hdev->fd_rule_lock);
+ sema_init(&hdev->reset_sem, 1);
+
+ ret = hclge_pci_init(hdev);
+ if (ret)
+ goto out;
+
+ /* Firmware command queue initialize */
+ ret = hclge_cmd_queue_init(hdev);
+ if (ret)
+ goto err_pci_uninit;
+
+ /* Firmware command initialize */
+ ret = hclge_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_uninit;
+
+ ret = hclge_clear_hw_resource(hdev);
+ if (ret)
+ goto err_cmd_uninit;
+
+ ret = hclge_get_cap(hdev);
+ if (ret)
+ goto err_cmd_uninit;
+
+ ret = hclge_query_dev_specs(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to query dev specifications, ret = %d.\n",
+ ret);
+ goto err_cmd_uninit;
+ }
+
+ ret = hclge_configure(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
+ goto err_cmd_uninit;
+ }
+
+ ret = hclge_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Init MSI/MSI-X error, ret = %d.\n", ret);
+ goto err_cmd_uninit;
+ }
+
+ ret = hclge_misc_irq_init(hdev);
+ if (ret)
+ goto err_msi_uninit;
+
+ ret = hclge_alloc_tqps(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Allocate TQPs error, ret = %d.\n", ret);
+ goto err_msi_irq_uninit;
+ }
+
+ ret = hclge_alloc_vport(hdev);
+ if (ret)
+ goto err_msi_irq_uninit;
+
+ ret = hclge_map_tqp(hdev);
+ if (ret)
+ goto err_msi_irq_uninit;
+
+ if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
+ ret = hclge_mac_mdio_config(hdev);
+ if (ret)
+ goto err_msi_irq_uninit;
+ }
+
+ ret = hclge_init_umv_space(hdev);
+ if (ret)
+ goto err_mdiobus_unreg;
+
+ ret = hclge_mac_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ goto err_mdiobus_unreg;
+
+ ret = hclge_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_tm_schd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "tm schd init fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ hclge_rss_init_cfg(hdev);
+ ret = hclge_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "manager table init fail, ret =%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fd table init fail, ret=%d\n", ret);
+ goto err_mdiobus_unreg;
+ }
+
+ INIT_KFIFO(hdev->mac_tnl_log);
+
+ hclge_dcb_ops_set(hdev);
+
+ timer_setup(&hdev->reset_timer, hclge_reset_timer, 0);
+ INIT_DELAYED_WORK(&hdev->service_task, hclge_service_task);
+
+ /* Setup affinity after service timer setup because add_timer_on
+ * is called in affinity notify.
+ */
+ hclge_misc_affinity_setup(hdev);
+
+ hclge_clear_all_event_cause(hdev);
+ hclge_clear_resetting_state(hdev);
+
+ /* Log and clear the hw errors those already occurred */
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
+ /* request delayed reset for the error recovery because an immediate
+ * global reset on a PF affecting pending initialization of other PFs
+ */
+ if (ae_dev->hw_err_reset_req) {
+ enum hnae3_reset_type reset_level;
+
+ reset_level = hclge_get_reset_level(ae_dev,
+ &ae_dev->hw_err_reset_req);
+ hclge_set_def_reset_request(ae_dev, reset_level);
+ mod_timer(&hdev->reset_timer, jiffies + HCLGE_RESET_INTERVAL);
+ }
+
+ /* Enable MISC vector(vector0) */
+ hclge_enable_vector(&hdev->misc_vector, true);
+
+ hclge_state_init(hdev);
+ hdev->last_reset_time = jiffies;
+
+ dev_info(&hdev->pdev->dev, "%s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
+ hclge_task_schedule(hdev, round_jiffies_relative(HZ));
+
+ return 0;
+
+err_mdiobus_unreg:
+ if (hdev->hw.mac.phydev)
+ mdiobus_unregister(hdev->hw.mac.mdio_bus);
+err_msi_irq_uninit:
+ hclge_misc_irq_uninit(hdev);
+err_msi_uninit:
+ pci_free_irq_vectors(pdev);
+err_cmd_uninit:
+ hclge_cmd_uninit(hdev);
+err_pci_uninit:
+ pcim_iounmap(pdev, hdev->hw.io_base);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+out:
+ mutex_destroy(&hdev->vport_lock);
+ return ret;
+}
+
+static void hclge_stats_clear(struct hclge_dev *hdev)
+{
+ memset(&hdev->mac_stats, 0, sizeof(hdev->mac_stats));
+}
+
+static int hclge_set_mac_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_config_switch_param(hdev, vf, enable,
+ HCLGE_SWITCH_ANTI_SPOOF_MASK);
+}
+
+static int hclge_set_vlan_spoofchk(struct hclge_dev *hdev, int vf, bool enable)
+{
+ return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
+ HCLGE_FILTER_FE_NIC_INGRESS_B,
+ enable, vf);
+}
+
+static int hclge_set_vf_spoofchk_hw(struct hclge_dev *hdev, int vf, bool enable)
+{
+ int ret;
+
+ ret = hclge_set_mac_spoofchk(hdev, vf, enable);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d mac spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+ return ret;
+ }
+
+ ret = hclge_set_vlan_spoofchk(hdev, vf, enable);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set vf %d vlan spoof check %s failed, ret=%d\n",
+ vf, enable ? "on" : "off", ret);
+
+ return ret;
+}
+
+static int hclge_set_vf_spoofchk(struct hnae3_handle *handle, int vf,
+ bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 new_spoofchk = enable ? 1 : 0;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.spoofchk == new_spoofchk)
+ return 0;
+
+ if (enable && test_bit(vport->vport_id, hdev->vf_vlan_full))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d vlan table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+ else if (enable && hclge_is_umv_space_full(vport, true))
+ dev_warn(&hdev->pdev->dev,
+ "vf %d mac table is full, enable spoof check may cause its packet send fail\n",
+ vf);
+
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id, enable);
+ if (ret)
+ return ret;
+
+ vport->vf_info.spoofchk = new_spoofchk;
+ return 0;
+}
+
+static int hclge_reset_vport_spoofchk(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ int i;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return 0;
+
+ /* resume the vf spoof check state after reset */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_set_vf_spoofchk_hw(hdev, vport->vport_id,
+ vport->vf_info.spoofchk);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_trust(struct hnae3_handle *handle, int vf, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+ u32 new_trusted = enable ? 1 : 0;
+ bool en_bc_pmc;
+ int ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (vport->vf_info.trusted == new_trusted)
+ return 0;
+
+ /* Disable promisc mode for VF if it is not trusted any more. */
+ if (!enable && vport->vf_info.promisc_enable) {
+ en_bc_pmc = ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
+ ret = hclge_set_vport_promisc_mode(vport, false, false,
+ en_bc_pmc);
+ if (ret)
+ return ret;
+ vport->vf_info.promisc_enable = 0;
+ hclge_inform_vf_promisc_info(vport);
+ }
+
+ vport->vf_info.trusted = new_trusted;
+
+ return 0;
+}
+
+static void hclge_reset_vf_rate(struct hclge_dev *hdev)
+{
+ int ret;
+ int vf;
+
+ /* reset vf rate to default value */
+ for (vf = HCLGE_VF_VPORT_START_NUM; vf < hdev->num_alloc_vport; vf++) {
+ struct hclge_vport *vport = &hdev->vport[vf];
+
+ vport->vf_info.max_tx_rate = 0;
+ ret = hclge_tm_qs_shaper_cfg(vport, vport->vf_info.max_tx_rate);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to reset to default, ret=%d\n",
+ vf - HCLGE_VF_VPORT_START_NUM, ret);
+ }
+}
+
+static int hclge_vf_rate_param_check(struct hclge_dev *hdev, int vf,
+ int min_tx_rate, int max_tx_rate)
+{
+ if (min_tx_rate != 0 ||
+ max_tx_rate < 0 || max_tx_rate > hdev->hw.mac.max_speed) {
+ dev_err(&hdev->pdev->dev,
+ "min_tx_rate:%d [0], max_tx_rate:%d [0, %u]\n",
+ min_tx_rate, max_tx_rate, hdev->hw.mac.max_speed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int hclge_set_vf_rate(struct hnae3_handle *handle, int vf,
+ int min_tx_rate, int max_tx_rate, bool force)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+
+ ret = hclge_vf_rate_param_check(hdev, vf, min_tx_rate, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ if (!force && max_tx_rate == vport->vf_info.max_tx_rate)
+ return 0;
+
+ ret = hclge_tm_qs_shaper_cfg(vport, max_tx_rate);
+ if (ret)
+ return ret;
+
+ vport->vf_info.max_tx_rate = max_tx_rate;
+
+ return 0;
+}
+
+static int hclge_resume_vf_rate(struct hclge_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->vport->nic;
+ struct hclge_vport *vport;
+ int ret;
+ int vf;
+
+ /* resume the vf max_tx_rate after reset */
+ for (vf = 0; vf < pci_num_vf(hdev->pdev); vf++) {
+ vport = hclge_get_vf_vport(hdev, vf);
+ if (!vport)
+ return -EINVAL;
+
+ /* zero means max rate, after reset, firmware already set it to
+ * max rate, so just continue.
+ */
+ if (!vport->vf_info.max_tx_rate)
+ continue;
+
+ ret = hclge_set_vf_rate(handle, vf, 0,
+ vport->vf_info.max_tx_rate, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%d failed to resume tx_rate:%u, ret=%d\n",
+ vf, vport->vf_info.max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hclge_reset_vport_state(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ hclge_vport_stop(vport);
+ vport++;
+ }
+}
+
+static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct pci_dev *pdev = ae_dev->pdev;
+ int ret;
+
+ set_bit(HCLGE_STATE_DOWN, &hdev->state);
+
+ hclge_stats_clear(hdev);
+ /* NOTE: pf reset needn't to clear or restore pf and vf table entry.
+ * so here should not clean table in memory.
+ */
+ if (hdev->reset_type == HNAE3_IMP_RESET ||
+ hdev->reset_type == HNAE3_GLOBAL_RESET) {
+ memset(hdev->vlan_table, 0, sizeof(hdev->vlan_table));
+ memset(hdev->vf_vlan_full, 0, sizeof(hdev->vf_vlan_full));
+ bitmap_set(hdev->vport_config_block, 0, hdev->num_alloc_vport);
+ hclge_reset_umv_space(hdev);
+ }
+
+ ret = hclge_cmd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cmd queue init failed\n");
+ return ret;
+ }
+
+ ret = hclge_map_tqp(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_mac_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Mac init error, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_config_tso(hdev, HCLGE_TSO_MSS_MIN, HCLGE_TSO_MSS_MAX);
+ if (ret) {
+ dev_err(&pdev->dev, "Enable tso fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
+ ret = hclge_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_tm_init_hw(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev, "tm init hw fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Rss init fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to reinit manager table, ret = %d\n", ret);
+ return ret;
+ }
+
+ ret = hclge_init_fd_config(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
+ return ret;
+ }
+
+ /* Log and clear the hw errors those already occurred */
+ hclge_handle_all_hns_hw_errors(ae_dev);
+
+ /* Re-enable the hw error interrupts because
+ * the interrupts get disabled on global reset.
+ */
+ ret = hclge_config_nic_hw_error(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail(%d) to re-enable NIC hw error interrupts\n",
+ ret);
+ return ret;
+ }
+
+ if (hdev->roce_client) {
+ ret = hclge_config_rocee_ras_interrupt(hdev, true);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "fail(%d) to re-enable roce ras interrupts\n",
+ ret);
+ return ret;
+ }
+ }
+
+ hclge_reset_vport_state(hdev);
+ ret = hclge_reset_vport_spoofchk(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_resume_vf_rate(hdev);
+ if (ret)
+ return ret;
+
+ dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n",
+ HCLGE_DRIVER_NAME);
+
+ return 0;
+}
+
+static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclge_dev *hdev = ae_dev->priv;
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ hclge_reset_vf_rate(hdev);
+ hclge_clear_vf_vlan(hdev);
+ hclge_misc_affinity_teardown(hdev);
+ hclge_state_uninit(hdev);
+ hclge_uninit_mac_table(hdev);
+
+ if (mac->phydev)
+ mdiobus_unregister(mac->mdio_bus);
+
+ /* Disable MISC vector(vector0) */
+ hclge_enable_vector(&hdev->misc_vector, false);
+ synchronize_irq(hdev->misc_vector.vector_irq);
+
+ /* Disable all hw interrupts */
+ hclge_config_mac_tnl_int(hdev, false);
+ hclge_config_nic_hw_error(hdev, false);
+ hclge_config_rocee_ras_interrupt(hdev, false);
+
+ hclge_cmd_uninit(hdev);
+ hclge_misc_irq_uninit(hdev);
+ hclge_pci_uninit(hdev);
+ mutex_destroy(&hdev->vport_lock);
+ hclge_uninit_vport_vlan_table(hdev);
+ ae_dev->priv = NULL;
+}
+
+static u32 hclge_get_max_channels(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return min_t(u32, hdev->rss_size_max,
+ vport->alloc_tqps / kinfo->num_tc);
+}
+
+static void hclge_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ ch->max_combined = hclge_get_max_channels(handle);
+ ch->other_count = 1;
+ ch->max_other = 1;
+ ch->combined_count = handle->kinfo.rss_size;
+}
+
+static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
+ u16 *alloc_tqps, u16 *max_rss_size)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ *alloc_tqps = vport->alloc_tqps;
+ *max_rss_size = hdev->rss_size_max;
+}
+
+static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
+ struct hclge_dev *hdev = vport->back;
+ u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
+ u16 cur_rss_size = kinfo->rss_size;
+ u16 cur_tqps = kinfo->num_tqps;
+ u16 tc_valid[HCLGE_MAX_TC_NUM];
+ u16 roundup_size;
+ u32 *rss_indir;
+ unsigned int i;
+ int ret;
+
+ kinfo->req_rss_size = new_tqps_num;
+
+ ret = hclge_tm_vport_map_update(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "tm vport map fail, ret =%d\n", ret);
+ return ret;
+ }
+
+ roundup_size = roundup_pow_of_two(kinfo->rss_size);
+ roundup_size = ilog2(roundup_size);
+ /* Set the RSS TC mode according to the new RSS size */
+ for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
+ tc_valid[i] = 0;
+
+ if (!(hdev->hw_tc_map & BIT(i)))
+ continue;
+
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = kinfo->rss_size * i;
+ }
+ ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
+ if (ret)
+ return ret;
+
+ /* RSS indirection table has been configuared by user */
+ if (rxfh_configured)
+ goto out;
+
+ /* Reinitializes the rss indirect table according to the new RSS size */
+ rss_indir = kcalloc(HCLGE_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!rss_indir)
+ return -ENOMEM;
+
+ for (i = 0; i < HCLGE_RSS_IND_TBL_SIZE; i++)
+ rss_indir[i] = i % kinfo->rss_size;
+
+ ret = hclge_set_rss(handle, rss_indir, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+ ret);
+
+ kfree(rss_indir);
+
+out:
+ if (!ret)
+ dev_info(&hdev->pdev->dev,
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
+ cur_rss_size, kinfo->rss_size,
+ cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+ return ret;
+}
+
+static int hclge_get_regs_num(struct hclge_dev *hdev, u32 *regs_num_32_bit,
+ u32 *regs_num_64_bit)
+{
+ struct hclge_desc desc;
+ u32 total_num;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_REG_NUM, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query register number cmd failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ *regs_num_32_bit = le32_to_cpu(desc.data[0]);
+ *regs_num_64_bit = le32_to_cpu(desc.data[1]);
+
+ total_num = *regs_num_32_bit + *regs_num_64_bit;
+ if (!total_num)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int hclge_get_32_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_32_BIT_REG_RTN_DATANUM 8
+#define HCLGE_32_BIT_DESC_NODATA_LEN 2
+
+ struct hclge_desc *desc;
+ u32 *reg_val = data;
+ __le32 *desc_data;
+ int nodata_num;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ nodata_num = HCLGE_32_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_num,
+ HCLGE_32_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_32_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 32 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le32 *)(&desc[i].data[0]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM - nodata_num;
+ } else {
+ desc_data = (__le32 *)(&desc[i]);
+ n = HCLGE_32_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le32_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num,
+ void *data)
+{
+#define HCLGE_64_BIT_REG_RTN_DATANUM 4
+#define HCLGE_64_BIT_DESC_NODATA_LEN 1
+
+ struct hclge_desc *desc;
+ u64 *reg_val = data;
+ __le64 *desc_data;
+ int nodata_len;
+ int cmd_num;
+ int i, k, n;
+ int ret;
+
+ if (regs_num == 0)
+ return 0;
+
+ nodata_len = HCLGE_64_BIT_DESC_NODATA_LEN;
+ cmd_num = DIV_ROUND_UP(regs_num + nodata_len,
+ HCLGE_64_BIT_REG_RTN_DATANUM);
+ desc = kcalloc(cmd_num, sizeof(struct hclge_desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_QUERY_64_BIT_REG, true);
+ ret = hclge_cmd_send(&hdev->hw, desc, cmd_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Query 64 bit register cmd failed, ret = %d.\n", ret);
+ kfree(desc);
+ return ret;
+ }
+
+ for (i = 0; i < cmd_num; i++) {
+ if (i == 0) {
+ desc_data = (__le64 *)(&desc[i].data[0]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM - nodata_len;
+ } else {
+ desc_data = (__le64 *)(&desc[i]);
+ n = HCLGE_64_BIT_REG_RTN_DATANUM;
+ }
+ for (k = 0; k < n; k++) {
+ *reg_val++ = le64_to_cpu(*desc_data++);
+
+ regs_num--;
+ if (!regs_num)
+ break;
+ }
+ }
+
+ kfree(desc);
+ return 0;
+}
+
+#define MAX_SEPARATE_NUM 4
+#define SEPARATOR_VALUE 0xFDFCFBFA
+#define REG_NUM_PER_LINE 4
+#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+#define REG_SEPARATOR_LINE 1
+#define REG_NUM_REMAIN_MASK 3
+
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev, struct hclge_desc *desc)
+{
+ int i;
+
+ /* initialize command BD except the last one */
+ for (i = 0; i < HCLGE_GET_DFX_REG_TYPE_CNT - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM,
+ true);
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+
+ /* initialize the last command BD */
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_DFX_BD_NUM, true);
+
+ return hclge_cmd_send(&hdev->hw, desc, HCLGE_GET_DFX_REG_TYPE_CNT);
+}
+
+static int hclge_get_dfx_reg_bd_num(struct hclge_dev *hdev,
+ int *bd_num_list,
+ u32 type_num)
+{
+ u32 entries_per_desc, desc_index, index, offset, i;
+ struct hclge_desc desc[HCLGE_GET_DFX_REG_TYPE_CNT];
+ int ret;
+
+ ret = hclge_query_bd_num_cmd_send(hdev, desc);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx bd num fail, status is %d.\n", ret);
+ return ret;
+ }
+
+ entries_per_desc = ARRAY_SIZE(desc[0].data);
+ for (i = 0; i < type_num; i++) {
+ offset = hclge_dfx_bd_offset_list[i];
+ index = offset % entries_per_desc;
+ desc_index = offset / entries_per_desc;
+ bd_num_list[i] = le32_to_cpu(desc[desc_index].data[index]);
+ }
+
+ return ret;
+}
+
+static int hclge_dfx_reg_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc_src, int bd_num,
+ enum hclge_opcode_type cmd)
+{
+ struct hclge_desc *desc = desc_src;
+ int i, ret;
+
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ for (i = 0; i < bd_num - 1; i++) {
+ desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ desc++;
+ hclge_cmd_setup_basic_desc(desc, cmd, true);
+ }
+
+ desc = desc_src;
+ ret = hclge_cmd_send(&hdev->hw, desc, bd_num);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Query dfx reg cmd(0x%x) send fail, status is %d.\n",
+ cmd, ret);
+
+ return ret;
+}
+
+static int hclge_dfx_reg_fetch_data(struct hclge_desc *desc_src, int bd_num,
+ void *data)
+{
+ int entries_per_desc, reg_num, separator_num, desc_index, index, i;
+ struct hclge_desc *desc = desc_src;
+ u32 *reg = data;
+
+ entries_per_desc = ARRAY_SIZE(desc->data);
+ reg_num = entries_per_desc * bd_num;
+ separator_num = REG_NUM_PER_LINE - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++) {
+ index = i % entries_per_desc;
+ desc_index = i / entries_per_desc;
+ *reg++ = le32_to_cpu(desc[desc_index].data[index]);
+ }
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ return reg_num + separator_num;
+}
+
+static int hclge_get_dfx_reg_len(struct hclge_dev *hdev, int *len)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int data_len_per_desc, bd_num, i;
+ int *bd_num_list;
+ u32 data_len;
+ int ret;
+
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ goto out;
+ }
+
+ data_len_per_desc = sizeof_field(struct hclge_desc, data);
+ *len = 0;
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ data_len = data_len_per_desc * bd_num;
+ *len += (data_len / REG_LEN_PER_LINE + 1) * REG_LEN_PER_LINE;
+ }
+
+out:
+ kfree(bd_num_list);
+ return ret;
+}
+
+static int hclge_get_dfx_reg(struct hclge_dev *hdev, void *data)
+{
+ u32 dfx_reg_type_num = ARRAY_SIZE(hclge_dfx_bd_offset_list);
+ int bd_num, bd_num_max, buf_len, i;
+ struct hclge_desc *desc_src;
+ int *bd_num_list;
+ u32 *reg = data;
+ int ret;
+
+ bd_num_list = kcalloc(dfx_reg_type_num, sizeof(int), GFP_KERNEL);
+ if (!bd_num_list)
+ return -ENOMEM;
+
+ ret = hclge_get_dfx_reg_bd_num(hdev, bd_num_list, dfx_reg_type_num);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg bd num fail, status is %d.\n", ret);
+ goto out;
+ }
+
+ bd_num_max = bd_num_list[0];
+ for (i = 1; i < dfx_reg_type_num; i++)
+ bd_num_max = max_t(int, bd_num_max, bd_num_list[i]);
+
+ buf_len = sizeof(*desc_src) * bd_num_max;
+ desc_src = kzalloc(buf_len, GFP_KERNEL);
+ if (!desc_src) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < dfx_reg_type_num; i++) {
+ bd_num = bd_num_list[i];
+ ret = hclge_dfx_reg_cmd_send(hdev, desc_src, bd_num,
+ hclge_dfx_reg_opcode_list[i]);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg fail, status is %d.\n", ret);
+ break;
+ }
+
+ reg += hclge_dfx_reg_fetch_data(desc_src, bd_num, reg);
+ }
+
+ kfree(desc_src);
+out:
+ kfree(bd_num_list);
+ return ret;
+}
+
+static int hclge_fetch_pf_reg(struct hclge_dev *hdev, void *data,
+ struct hnae3_knic_private_info *kinfo)
+{
+#define HCLGE_RING_REG_OFFSET 0x200
+#define HCLGE_RING_INT_REG_OFFSET 0x4
+
+ int i, j, reg_num, separator_num;
+ int data_num_sum;
+ u32 *reg = data;
+
+ /* fetching per-PF registers valus from PF PCIe register space */
+ reg_num = ARRAY_SIZE(cmdq_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ data_num_sum = reg_num + separator_num;
+
+ reg_num = ARRAY_SIZE(common_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ data_num_sum += reg_num + separator_num;
+
+ reg_num = ARRAY_SIZE(ring_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (j = 0; j < kinfo->num_tqps; j++) {
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ HCLGE_RING_REG_OFFSET * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+ data_num_sum += (reg_num + separator_num) * kinfo->num_tqps;
+
+ reg_num = ARRAY_SIZE(tqp_intr_reg_addr_list);
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ for (i = 0; i < reg_num; i++)
+ *reg++ = hclge_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ HCLGE_RING_INT_REG_OFFSET * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+ data_num_sum += (reg_num + separator_num) * (hdev->num_msi_used - 1);
+
+ return data_num_sum;
+}
+
+static int hclge_get_regs_len(struct hnae3_handle *handle)
+{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ int regs_num_32_bit, regs_num_64_bit, dfx_regs_len;
+ int regs_lines_32_bit, regs_lines_64_bit;
+ int ret;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ ret = hclge_get_dfx_reg_len(hdev, &dfx_regs_len);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get dfx reg len failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_32_bit = regs_num_32_bit * sizeof(u32) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+ regs_lines_64_bit = regs_num_64_bit * sizeof(u64) / REG_LEN_PER_LINE +
+ REG_SEPARATOR_LINE;
+
+ return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1) + regs_lines_32_bit +
+ regs_lines_64_bit) * REG_LEN_PER_LINE + dfx_regs_len;
+}
+
+static void hclge_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 regs_num_32_bit, regs_num_64_bit;
+ int i, reg_num, separator_num, ret;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+
+ ret = hclge_get_regs_num(hdev, &regs_num_32_bit, &regs_num_64_bit);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get register number failed, ret = %d.\n", ret);
+ return;
+ }
+
+ reg += hclge_fetch_pf_reg(hdev, reg, kinfo);
+
+ ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 32 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg_num = regs_num_32_bit;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Get 64 bit register failed, ret = %d.\n", ret);
+ return;
+ }
+ reg_num = regs_num_64_bit * 2;
+ reg += reg_num;
+ separator_num = MAX_SEPARATE_NUM - (reg_num & REG_NUM_REMAIN_MASK);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ ret = hclge_get_dfx_reg(hdev, reg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Get dfx register failed, ret = %d.\n", ret);
+}
+
+static int hclge_set_led_status(struct hclge_dev *hdev, u8 locate_led_status)
+{
+ struct hclge_set_led_state_cmd *req;
+ struct hclge_desc desc;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_LED_STATUS_CFG, false);
+
+ req = (struct hclge_set_led_state_cmd *)desc.data;
+ hnae3_set_field(req->locate_led_config, HCLGE_LED_LOCATE_STATE_M,
+ HCLGE_LED_LOCATE_STATE_S, locate_led_status);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Send set led state cmd error, ret =%d\n", ret);
+
+ return ret;
+}
+
+enum hclge_led_status {
+ HCLGE_LED_OFF,
+ HCLGE_LED_ON,
+ HCLGE_LED_NO_CHANGE = 0xFF,
+};
+
+static int hclge_set_led_id(struct hnae3_handle *handle,
+ enum ethtool_phys_id_state status)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ switch (status) {
+ case ETHTOOL_ID_ACTIVE:
+ return hclge_set_led_status(hdev, HCLGE_LED_ON);
+ case ETHTOOL_ID_INACTIVE:
+ return hclge_set_led_status(hdev, HCLGE_LED_OFF);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void hclge_get_link_mode(struct hnae3_handle *handle,
+ unsigned long *supported,
+ unsigned long *advertising)
+{
+ unsigned int size = BITS_TO_LONGS(__ETHTOOL_LINK_MODE_MASK_NBITS);
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ unsigned int idx = 0;
+
+ for (; idx < size; idx++) {
+ supported[idx] = hdev->hw.mac.supported[idx];
+ advertising[idx] = hdev->hw.mac.advertising[idx];
+ }
+}
+
+static int hclge_gro_en(struct hnae3_handle *handle, bool enable)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+
+ return hclge_config_gro(hdev, enable);
+}
+
+static void hclge_sync_promisc_mode(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = &hdev->vport[0];
+ struct hnae3_handle *handle = &vport->nic;
+ u8 tmp_flags;
+ int ret;
+
+ if (vport->last_promisc_flags != vport->overflow_promisc_flags) {
+ set_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ vport->last_promisc_flags = vport->overflow_promisc_flags;
+ }
+
+ if (test_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state)) {
+ tmp_flags = handle->netdev_flags | vport->last_promisc_flags;
+ ret = hclge_set_promisc_mode(handle, tmp_flags & HNAE3_UPE,
+ tmp_flags & HNAE3_MPE);
+ if (!ret) {
+ clear_bit(HCLGE_STATE_PROMISC_CHANGED, &hdev->state);
+ hclge_enable_vlan_filter(handle,
+ tmp_flags & HNAE3_VLAN_FLTR);
+ }
+ }
+}
+
+static bool hclge_module_existed(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ u32 existed;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GET_SFP_EXIST, true);
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP exist state, ret = %d\n", ret);
+ return false;
+ }
+
+ existed = le32_to_cpu(desc.data[0]);
+
+ return existed != 0;
+}
+
+/* need 6 bds(total 140 bytes) in one reading
+ * return the number of bytes actually read, 0 means read failed.
+ */
+static u16 hclge_get_sfp_eeprom_info(struct hclge_dev *hdev, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_desc desc[HCLGE_SFP_INFO_CMD_NUM];
+ struct hclge_sfp_info_bd0_cmd *sfp_info_bd0;
+ u16 read_len;
+ u16 copy_len;
+ int ret;
+ int i;
+
+ /* setup all 6 bds to read module eeprom info. */
+ for (i = 0; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], HCLGE_OPC_GET_SFP_EEPROM,
+ true);
+
+ /* bd0~bd4 need next flag */
+ if (i < HCLGE_SFP_INFO_CMD_NUM - 1)
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+
+ /* setup bd0, this bd contains offset and read length. */
+ sfp_info_bd0 = (struct hclge_sfp_info_bd0_cmd *)desc[0].data;
+ sfp_info_bd0->offset = cpu_to_le16((u16)offset);
+ read_len = min_t(u16, len, HCLGE_SFP_INFO_MAX_LEN);
+ sfp_info_bd0->read_len = cpu_to_le16(read_len);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, i);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to get SFP eeprom info, ret = %d\n", ret);
+ return 0;
+ }
+
+ /* copy sfp info from bd0 to out buffer. */
+ copy_len = min_t(u16, len, HCLGE_SFP_INFO_BD0_LEN);
+ memcpy(data, sfp_info_bd0->data, copy_len);
+ read_len = copy_len;
+
+ /* copy sfp info from bd1~bd5 to out buffer if needed. */
+ for (i = 1; i < HCLGE_SFP_INFO_CMD_NUM; i++) {
+ if (read_len >= len)
+ return read_len;
+
+ copy_len = min_t(u16, len - read_len, HCLGE_SFP_INFO_BDX_LEN);
+ memcpy(data + read_len, desc[i].data, copy_len);
+ read_len += copy_len;
+ }
+
+ return read_len;
+}
+
+static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset,
+ u32 len, u8 *data)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ u32 read_len = 0;
+ u16 data_len;
+
+ if (hdev->hw.mac.media_type != HNAE3_MEDIA_TYPE_FIBER)
+ return -EOPNOTSUPP;
+
+ if (!hclge_module_existed(hdev))
+ return -ENXIO;
+
+ while (read_len < len) {
+ data_len = hclge_get_sfp_eeprom_info(hdev,
+ offset + read_len,
+ len - read_len,
+ data + read_len);
+ if (!data_len)
+ return -EIO;
+
+ read_len += data_len;
+ }
+
+ return 0;
+}
+
+static const struct hnae3_ae_ops hclge_ops = {
+ .init_ae_dev = hclge_init_ae_dev,
+ .uninit_ae_dev = hclge_uninit_ae_dev,
+ .flr_prepare = hclge_flr_prepare,
+ .flr_done = hclge_flr_done,
+ .init_client_instance = hclge_init_client_instance,
+ .uninit_client_instance = hclge_uninit_client_instance,
+ .map_ring_to_vector = hclge_map_ring_to_vector,
+ .unmap_ring_from_vector = hclge_unmap_ring_frm_vector,
+ .get_vector = hclge_get_vector,
+ .put_vector = hclge_put_vector,
+ .set_promisc_mode = hclge_set_promisc_mode,
+ .request_update_promisc_mode = hclge_request_update_promisc_mode,
+ .set_loopback = hclge_set_loopback,
+ .start = hclge_ae_start,
+ .stop = hclge_ae_stop,
+ .client_start = hclge_client_start,
+ .client_stop = hclge_client_stop,
+ .get_status = hclge_get_status,
+ .get_ksettings_an_result = hclge_get_ksettings_an_result,
+ .cfg_mac_speed_dup_h = hclge_cfg_mac_speed_dup_h,
+ .get_media_type = hclge_get_media_type,
+ .check_port_speed = hclge_check_port_speed,
+ .get_fec = hclge_get_fec,
+ .set_fec = hclge_set_fec,
+ .get_rss_key_size = hclge_get_rss_key_size,
+ .get_rss_indir_size = hclge_get_rss_indir_size,
+ .get_rss = hclge_get_rss,
+ .set_rss = hclge_set_rss,
+ .set_rss_tuple = hclge_set_rss_tuple,
+ .get_rss_tuple = hclge_get_rss_tuple,
+ .get_tc_size = hclge_get_tc_size,
+ .get_mac_addr = hclge_get_mac_addr,
+ .set_mac_addr = hclge_set_mac_addr,
+ .do_ioctl = hclge_do_ioctl,
+ .add_uc_addr = hclge_add_uc_addr,
+ .rm_uc_addr = hclge_rm_uc_addr,
+ .add_mc_addr = hclge_add_mc_addr,
+ .rm_mc_addr = hclge_rm_mc_addr,
+ .set_autoneg = hclge_set_autoneg,
+ .get_autoneg = hclge_get_autoneg,
+ .restart_autoneg = hclge_restart_autoneg,
+ .halt_autoneg = hclge_halt_autoneg,
+ .get_pauseparam = hclge_get_pauseparam,
+ .set_pauseparam = hclge_set_pauseparam,
+ .set_mtu = hclge_set_mtu,
+ .reset_queue = hclge_reset_tqp,
+ .get_stats = hclge_get_stats,
+ .get_mac_stats = hclge_get_mac_stat,
+ .update_stats = hclge_update_stats,
+ .get_strings = hclge_get_strings,
+ .get_sset_count = hclge_get_sset_count,
+ .get_fw_version = hclge_get_fw_version,
+ .get_mdix_mode = hclge_get_mdix_mode,
+ .enable_vlan_filter = hclge_enable_vlan_filter,
+ .set_vlan_filter = hclge_set_vlan_filter,
+ .set_vf_vlan_filter = hclge_set_vf_vlan_filter,
+ .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag,
+ .reset_event = hclge_reset_event,
+ .get_reset_level = hclge_get_reset_level,
+ .set_default_reset_request = hclge_set_def_reset_request,
+ .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info,
+ .set_channels = hclge_set_channels,
+ .get_channels = hclge_get_channels,
+ .get_regs_len = hclge_get_regs_len,
+ .get_regs = hclge_get_regs,
+ .set_led_id = hclge_set_led_id,
+ .get_link_mode = hclge_get_link_mode,
+ .add_fd_entry = hclge_add_fd_entry,
+ .del_fd_entry = hclge_del_fd_entry,
+ .del_all_fd_entries = hclge_del_all_fd_entries,
+ .get_fd_rule_cnt = hclge_get_fd_rule_cnt,
+ .get_fd_rule_info = hclge_get_fd_rule_info,
+ .get_fd_all_rules = hclge_get_all_rules,
+ .enable_fd = hclge_enable_fd,
+ .add_arfs_entry = hclge_add_fd_entry_by_arfs,
+ .dbg_run_cmd = hclge_dbg_run_cmd,
+ .handle_hw_ras_error = hclge_handle_hw_ras_error,
+ .get_hw_reset_stat = hclge_get_hw_reset_stat,
+ .ae_dev_resetting = hclge_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
+ .set_gro_en = hclge_gro_en,
+ .get_global_queue_id = hclge_covert_handle_qid_global,
+ .set_timer_task = hclge_set_timer_task,
+ .mac_connect_phy = hclge_mac_connect_phy,
+ .mac_disconnect_phy = hclge_mac_disconnect_phy,
+ .get_vf_config = hclge_get_vf_config,
+ .set_vf_link_state = hclge_set_vf_link_state,
+ .set_vf_spoofchk = hclge_set_vf_spoofchk,
+ .set_vf_trust = hclge_set_vf_trust,
+ .set_vf_rate = hclge_set_vf_rate,
+ .set_vf_mac = hclge_set_vf_mac,
+ .get_module_eeprom = hclge_get_module_eeprom,
+ .get_cmdq_stat = hclge_get_cmdq_stat,
+};
+
+static struct hnae3_ae_algo ae_algo = {
+ .ops = &hclge_ops,
+ .pdev_id_table = ae_algo_pci_tbl,
+};
+
+static int hclge_init(void)
+{
+ pr_info("%s is initializing\n", HCLGE_NAME);
+
+ hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME);
+ if (!hclge_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
+ return -ENOMEM;
+ }
+
+ hnae3_register_ae_algo(&ae_algo);
+
+ return 0;
+}
+
+static void hclge_exit(void)
+{
+ hnae3_unregister_ae_algo_prepare(&ae_algo);
+ hnae3_unregister_ae_algo(&ae_algo);
+ destroy_workqueue(hclge_wq);
+}
+module_init(hclge_init);
+module_exit(hclge_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGE Driver");
+MODULE_VERSION(HCLGE_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
new file mode 100644
index 000000000..213ac73f9
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h
@@ -0,0 +1,1017 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_MAIN_H
+#define __HCLGE_MAIN_H
+#include <linux/fs.h>
+#include <linux/types.h>
+#include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/kfifo.h>
+
+#include "hclge_cmd.h"
+#include "hnae3.h"
+
+#define HCLGE_MOD_VERSION "1.0"
+#define HCLGE_DRIVER_NAME "hclge"
+
+#define HCLGE_MAX_PF_NUM 8
+
+#define HCLGE_RD_FIRST_STATS_NUM 2
+#define HCLGE_RD_OTHER_STATS_NUM 4
+
+#define HCLGE_INVALID_VPORT 0xffff
+
+#define HCLGE_PF_CFG_BLOCK_SIZE 32
+#define HCLGE_PF_CFG_DESC_NUM \
+ (HCLGE_PF_CFG_BLOCK_SIZE / HCLGE_CFG_RD_LEN_BYTES)
+
+#define HCLGE_VECTOR_REG_BASE 0x20000
+#define HCLGE_MISC_VECTOR_REG_BASE 0x20400
+
+#define HCLGE_VECTOR_REG_OFFSET 0x4
+#define HCLGE_VECTOR_VF_OFFSET 0x100000
+
+#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000
+#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004
+#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008
+#define HCLGE_CMDQ_TX_TAIL_REG 0x27010
+#define HCLGE_CMDQ_TX_HEAD_REG 0x27014
+#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018
+#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C
+#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020
+#define HCLGE_CMDQ_RX_TAIL_REG 0x27024
+#define HCLGE_CMDQ_RX_HEAD_REG 0x27028
+#define HCLGE_CMDQ_INTR_SRC_REG 0x27100
+#define HCLGE_CMDQ_INTR_STS_REG 0x27104
+#define HCLGE_CMDQ_INTR_EN_REG 0x27108
+#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C
+
+/* bar registers for common func */
+#define HCLGE_VECTOR0_OTER_EN_REG 0x20600
+#define HCLGE_RAS_OTHER_STS_REG 0x20B00
+#define HCLGE_FUNC_RESET_STS_REG 0x20C00
+#define HCLGE_GRO_EN_REG 0x28000
+
+/* bar registers for rcb */
+#define HCLGE_RING_RX_ADDR_L_REG 0x80000
+#define HCLGE_RING_RX_ADDR_H_REG 0x80004
+#define HCLGE_RING_RX_BD_NUM_REG 0x80008
+#define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C
+#define HCLGE_RING_RX_MERGE_EN_REG 0x80014
+#define HCLGE_RING_RX_TAIL_REG 0x80018
+#define HCLGE_RING_RX_HEAD_REG 0x8001C
+#define HCLGE_RING_RX_FBD_NUM_REG 0x80020
+#define HCLGE_RING_RX_OFFSET_REG 0x80024
+#define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028
+#define HCLGE_RING_RX_STASH_REG 0x80030
+#define HCLGE_RING_RX_BD_ERR_REG 0x80034
+#define HCLGE_RING_TX_ADDR_L_REG 0x80040
+#define HCLGE_RING_TX_ADDR_H_REG 0x80044
+#define HCLGE_RING_TX_BD_NUM_REG 0x80048
+#define HCLGE_RING_TX_PRIORITY_REG 0x8004C
+#define HCLGE_RING_TX_TC_REG 0x80050
+#define HCLGE_RING_TX_MERGE_EN_REG 0x80054
+#define HCLGE_RING_TX_TAIL_REG 0x80058
+#define HCLGE_RING_TX_HEAD_REG 0x8005C
+#define HCLGE_RING_TX_FBD_NUM_REG 0x80060
+#define HCLGE_RING_TX_OFFSET_REG 0x80064
+#define HCLGE_RING_TX_EBD_NUM_REG 0x80068
+#define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070
+#define HCLGE_RING_TX_BD_ERR_REG 0x80074
+#define HCLGE_RING_EN_REG 0x80090
+
+/* bar registers for tqp interrupt */
+#define HCLGE_TQP_INTR_CTRL_REG 0x20000
+#define HCLGE_TQP_INTR_GL0_REG 0x20100
+#define HCLGE_TQP_INTR_GL1_REG 0x20200
+#define HCLGE_TQP_INTR_GL2_REG 0x20300
+#define HCLGE_TQP_INTR_RL_REG 0x20900
+
+#define HCLGE_RSS_IND_TBL_SIZE 512
+#define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0)
+#define HCLGE_RSS_KEY_SIZE 40
+#define HCLGE_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGE_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGE_RSS_HASH_ALGO_SYMMETRIC 2
+#define HCLGE_RSS_HASH_ALGO_MASK GENMASK(3, 0)
+#define HCLGE_RSS_CFG_TBL_NUM \
+ (HCLGE_RSS_IND_TBL_SIZE / HCLGE_RSS_CFG_TBL_SIZE)
+
+#define HCLGE_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+#define HCLGE_D_PORT_BIT BIT(0)
+#define HCLGE_S_PORT_BIT BIT(1)
+#define HCLGE_D_IP_BIT BIT(2)
+#define HCLGE_S_IP_BIT BIT(3)
+#define HCLGE_V_TAG_BIT BIT(4)
+#define HCLGE_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGE_D_IP_BIT | HCLGE_S_IP_BIT | HCLGE_V_TAG_BIT)
+
+#define HCLGE_RSS_TC_SIZE_0 1
+#define HCLGE_RSS_TC_SIZE_1 2
+#define HCLGE_RSS_TC_SIZE_2 4
+#define HCLGE_RSS_TC_SIZE_3 8
+#define HCLGE_RSS_TC_SIZE_4 16
+#define HCLGE_RSS_TC_SIZE_5 32
+#define HCLGE_RSS_TC_SIZE_6 64
+#define HCLGE_RSS_TC_SIZE_7 128
+
+#define HCLGE_UMV_TBL_SIZE 3072
+#define HCLGE_DEFAULT_UMV_SPACE_PER_PF \
+ (HCLGE_UMV_TBL_SIZE / HCLGE_MAX_PF_NUM)
+
+#define HCLGE_TQP_RESET_TRY_TIMES 200
+
+#define HCLGE_PHY_PAGE_MDIX 0
+#define HCLGE_PHY_PAGE_COPPER 0
+
+/* Page Selection Reg. */
+#define HCLGE_PHY_PAGE_REG 22
+
+/* Copper Specific Control Register */
+#define HCLGE_PHY_CSC_REG 16
+
+/* Copper Specific Status Register */
+#define HCLGE_PHY_CSS_REG 17
+
+#define HCLGE_PHY_MDIX_CTRL_S 5
+#define HCLGE_PHY_MDIX_CTRL_M GENMASK(6, 5)
+
+#define HCLGE_PHY_MDIX_STATUS_B 6
+#define HCLGE_PHY_SPEED_DUP_RESOLVE_B 11
+
+#define HCLGE_GET_DFX_REG_TYPE_CNT 4
+
+/* Factor used to calculate offset and bitmap of VF num */
+#define HCLGE_VF_NUM_PER_CMD 64
+
+enum HLCGE_PORT_TYPE {
+ HOST_PORT,
+ NETWORK_PORT
+};
+
+#define PF_VPORT_ID 0
+
+#define HCLGE_PF_ID_S 0
+#define HCLGE_PF_ID_M GENMASK(2, 0)
+#define HCLGE_VF_ID_S 3
+#define HCLGE_VF_ID_M GENMASK(10, 3)
+#define HCLGE_PORT_TYPE_B 11
+#define HCLGE_NETWORK_PORT_ID_S 0
+#define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0)
+
+/* Reset related Registers */
+#define HCLGE_PF_OTHER_INT_REG 0x20600
+#define HCLGE_MISC_RESET_STS_REG 0x20700
+#define HCLGE_MISC_VECTOR_INT_STS 0x20800
+#define HCLGE_GLOBAL_RESET_REG 0x20A00
+#define HCLGE_GLOBAL_RESET_BIT 0
+#define HCLGE_CORE_RESET_BIT 1
+#define HCLGE_IMP_RESET_BIT 2
+#define HCLGE_RESET_INT_M GENMASK(7, 5)
+#define HCLGE_FUN_RST_ING 0x20C00
+#define HCLGE_FUN_RST_ING_B 0
+
+/* Vector0 register bits define */
+#define HCLGE_VECTOR0_GLOBALRESET_INT_B 5
+#define HCLGE_VECTOR0_CORERESET_INT_B 6
+#define HCLGE_VECTOR0_IMPRESET_INT_B 7
+
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGE_VECTOR0_CMDQ_SRC_REG 0x27100
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGE_VECTOR0_RX_CMDQ_INT_B 1
+
+#define HCLGE_VECTOR0_IMP_RESET_INT_B 1
+#define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U
+#define HCLGE_VECTOR0_IMP_RD_POISON_B 5U
+
+#define HCLGE_MAC_DEFAULT_FRAME \
+ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN)
+#define HCLGE_MAC_MIN_FRAME 64
+#define HCLGE_MAC_MAX_FRAME 9728
+
+#define HCLGE_SUPPORT_1G_BIT BIT(0)
+#define HCLGE_SUPPORT_10G_BIT BIT(1)
+#define HCLGE_SUPPORT_25G_BIT BIT(2)
+#define HCLGE_SUPPORT_50G_BIT BIT(3)
+#define HCLGE_SUPPORT_100G_BIT BIT(4)
+/* to be compatible with exsit board */
+#define HCLGE_SUPPORT_40G_BIT BIT(5)
+#define HCLGE_SUPPORT_100M_BIT BIT(6)
+#define HCLGE_SUPPORT_10M_BIT BIT(7)
+#define HCLGE_SUPPORT_200G_BIT BIT(8)
+#define HCLGE_SUPPORT_GE \
+ (HCLGE_SUPPORT_1G_BIT | HCLGE_SUPPORT_100M_BIT | HCLGE_SUPPORT_10M_BIT)
+
+enum HCLGE_DEV_STATE {
+ HCLGE_STATE_REINITING,
+ HCLGE_STATE_DOWN,
+ HCLGE_STATE_DISABLED,
+ HCLGE_STATE_REMOVING,
+ HCLGE_STATE_NIC_REGISTERED,
+ HCLGE_STATE_ROCE_REGISTERED,
+ HCLGE_STATE_SERVICE_INITED,
+ HCLGE_STATE_RST_SERVICE_SCHED,
+ HCLGE_STATE_RST_HANDLING,
+ HCLGE_STATE_MBX_SERVICE_SCHED,
+ HCLGE_STATE_MBX_HANDLING,
+ HCLGE_STATE_STATISTICS_UPDATING,
+ HCLGE_STATE_CMD_DISABLE,
+ HCLGE_STATE_LINK_UPDATING,
+ HCLGE_STATE_PROMISC_CHANGED,
+ HCLGE_STATE_RST_FAIL,
+ HCLGE_STATE_MAX
+};
+
+enum hclge_evt_cause {
+ HCLGE_VECTOR0_EVENT_RST,
+ HCLGE_VECTOR0_EVENT_MBX,
+ HCLGE_VECTOR0_EVENT_ERR,
+ HCLGE_VECTOR0_EVENT_OTHER,
+};
+
+enum HCLGE_MAC_SPEED {
+ HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */
+ HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */
+ HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */
+ HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */
+ HCLGE_MAC_SPEED_10G = 10000, /* 10000 Mbps = 10 Gbps */
+ HCLGE_MAC_SPEED_25G = 25000, /* 25000 Mbps = 25 Gbps */
+ HCLGE_MAC_SPEED_40G = 40000, /* 40000 Mbps = 40 Gbps */
+ HCLGE_MAC_SPEED_50G = 50000, /* 50000 Mbps = 50 Gbps */
+ HCLGE_MAC_SPEED_100G = 100000, /* 100000 Mbps = 100 Gbps */
+ HCLGE_MAC_SPEED_200G = 200000 /* 200000 Mbps = 200 Gbps */
+};
+
+enum HCLGE_MAC_DUPLEX {
+ HCLGE_MAC_HALF,
+ HCLGE_MAC_FULL
+};
+
+#define QUERY_SFP_SPEED 0
+#define QUERY_ACTIVE_SPEED 1
+
+struct hclge_mac {
+ u8 mac_id;
+ u8 phy_addr;
+ u8 flag;
+ u8 media_type; /* port media type, e.g. fibre/copper/backplane */
+ u8 mac_addr[ETH_ALEN];
+ u8 autoneg;
+ u8 duplex;
+ u8 support_autoneg;
+ u8 speed_type; /* 0: sfp speed, 1: active speed */
+ u32 speed;
+ u32 max_speed;
+ u32 speed_ability; /* speed ability supported by current media */
+ u32 module_type; /* sub media type, e.g. kr/cr/sr/lr */
+ u32 fec_mode; /* active fec mode */
+ u32 user_fec_mode;
+ u32 fec_ability;
+ int link; /* store the link status of mac & phy (if phy exists) */
+ struct phy_device *phydev;
+ struct mii_bus *mdio_bus;
+ phy_interface_t phy_if;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(supported);
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising);
+};
+
+struct hclge_hw {
+ void __iomem *io_base;
+ struct hclge_mac mac;
+ int num_vec;
+ struct hclge_cmq cmq;
+};
+
+/* TQP stats */
+struct hlcge_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclge_tqp {
+ /* copy of device pointer from pci_dev,
+ * used when perform DMA mapping
+ */
+ struct device *dev;
+ struct hnae3_queue q;
+ struct hlcge_tqp_stats tqp_stats;
+ u16 index; /* Global index in a NIC controller */
+
+ bool alloced;
+};
+
+enum hclge_fc_mode {
+ HCLGE_FC_NONE,
+ HCLGE_FC_RX_PAUSE,
+ HCLGE_FC_TX_PAUSE,
+ HCLGE_FC_FULL,
+ HCLGE_FC_PFC,
+ HCLGE_FC_DEFAULT
+};
+
+enum hclge_link_fail_code {
+ HCLGE_LF_NORMAL,
+ HCLGE_LF_REF_CLOCK_LOST,
+ HCLGE_LF_XSFP_TX_DISABLE,
+ HCLGE_LF_XSFP_ABSENT,
+};
+
+#define HCLGE_LINK_STATUS_DOWN 0
+#define HCLGE_LINK_STATUS_UP 1
+
+#define HCLGE_PG_NUM 4
+#define HCLGE_SCH_MODE_SP 0
+#define HCLGE_SCH_MODE_DWRR 1
+struct hclge_pg_info {
+ u8 pg_id;
+ u8 pg_sch_mode; /* 0: sp; 1: dwrr */
+ u8 tc_bit_map;
+ u32 bw_limit;
+ u8 tc_dwrr[HNAE3_MAX_TC];
+};
+
+struct hclge_tc_info {
+ u8 tc_id;
+ u8 tc_sch_mode; /* 0: sp; 1: dwrr */
+ u8 pgid;
+ u32 bw_limit;
+};
+
+struct hclge_cfg {
+ u8 vmdq_vport_num;
+ u8 tc_num;
+ u16 tqp_desc_num;
+ u16 rx_buf_len;
+ u16 rss_size_max;
+ u8 phy_addr;
+ u8 media_type;
+ u8 mac_addr[ETH_ALEN];
+ u8 default_speed;
+ u32 numa_node_map;
+ u16 speed_ability;
+ u16 umv_space;
+};
+
+struct hclge_tm_info {
+ u8 num_tc;
+ u8 num_pg; /* It must be 1 if vNET-Base schd */
+ u8 pg_dwrr[HCLGE_PG_NUM];
+ u8 prio_tc[HNAE3_MAX_USER_PRIO];
+ struct hclge_pg_info pg_info[HCLGE_PG_NUM];
+ struct hclge_tc_info tc_info[HNAE3_MAX_TC];
+ enum hclge_fc_mode fc_mode;
+ u8 hw_pfc_map; /* Allow for packet drop or not on this TC */
+ u8 pfc_en; /* PFC enabled or not for user priority */
+};
+
+struct hclge_comm_stats_str {
+ char desc[ETH_GSTRING_LEN];
+ unsigned long offset;
+};
+
+/* mac stats ,opcode id: 0x0032 */
+struct hclge_mac_stats {
+ u64 mac_tx_mac_pause_num;
+ u64 mac_rx_mac_pause_num;
+ u64 mac_tx_pfc_pri0_pkt_num;
+ u64 mac_tx_pfc_pri1_pkt_num;
+ u64 mac_tx_pfc_pri2_pkt_num;
+ u64 mac_tx_pfc_pri3_pkt_num;
+ u64 mac_tx_pfc_pri4_pkt_num;
+ u64 mac_tx_pfc_pri5_pkt_num;
+ u64 mac_tx_pfc_pri6_pkt_num;
+ u64 mac_tx_pfc_pri7_pkt_num;
+ u64 mac_rx_pfc_pri0_pkt_num;
+ u64 mac_rx_pfc_pri1_pkt_num;
+ u64 mac_rx_pfc_pri2_pkt_num;
+ u64 mac_rx_pfc_pri3_pkt_num;
+ u64 mac_rx_pfc_pri4_pkt_num;
+ u64 mac_rx_pfc_pri5_pkt_num;
+ u64 mac_rx_pfc_pri6_pkt_num;
+ u64 mac_rx_pfc_pri7_pkt_num;
+ u64 mac_tx_total_pkt_num;
+ u64 mac_tx_total_oct_num;
+ u64 mac_tx_good_pkt_num;
+ u64 mac_tx_bad_pkt_num;
+ u64 mac_tx_good_oct_num;
+ u64 mac_tx_bad_oct_num;
+ u64 mac_tx_uni_pkt_num;
+ u64 mac_tx_multi_pkt_num;
+ u64 mac_tx_broad_pkt_num;
+ u64 mac_tx_undersize_pkt_num;
+ u64 mac_tx_oversize_pkt_num;
+ u64 mac_tx_64_oct_pkt_num;
+ u64 mac_tx_65_127_oct_pkt_num;
+ u64 mac_tx_128_255_oct_pkt_num;
+ u64 mac_tx_256_511_oct_pkt_num;
+ u64 mac_tx_512_1023_oct_pkt_num;
+ u64 mac_tx_1024_1518_oct_pkt_num;
+ u64 mac_tx_1519_2047_oct_pkt_num;
+ u64 mac_tx_2048_4095_oct_pkt_num;
+ u64 mac_tx_4096_8191_oct_pkt_num;
+ u64 rsv0;
+ u64 mac_tx_8192_9216_oct_pkt_num;
+ u64 mac_tx_9217_12287_oct_pkt_num;
+ u64 mac_tx_12288_16383_oct_pkt_num;
+ u64 mac_tx_1519_max_good_oct_pkt_num;
+ u64 mac_tx_1519_max_bad_oct_pkt_num;
+
+ u64 mac_rx_total_pkt_num;
+ u64 mac_rx_total_oct_num;
+ u64 mac_rx_good_pkt_num;
+ u64 mac_rx_bad_pkt_num;
+ u64 mac_rx_good_oct_num;
+ u64 mac_rx_bad_oct_num;
+ u64 mac_rx_uni_pkt_num;
+ u64 mac_rx_multi_pkt_num;
+ u64 mac_rx_broad_pkt_num;
+ u64 mac_rx_undersize_pkt_num;
+ u64 mac_rx_oversize_pkt_num;
+ u64 mac_rx_64_oct_pkt_num;
+ u64 mac_rx_65_127_oct_pkt_num;
+ u64 mac_rx_128_255_oct_pkt_num;
+ u64 mac_rx_256_511_oct_pkt_num;
+ u64 mac_rx_512_1023_oct_pkt_num;
+ u64 mac_rx_1024_1518_oct_pkt_num;
+ u64 mac_rx_1519_2047_oct_pkt_num;
+ u64 mac_rx_2048_4095_oct_pkt_num;
+ u64 mac_rx_4096_8191_oct_pkt_num;
+ u64 rsv1;
+ u64 mac_rx_8192_9216_oct_pkt_num;
+ u64 mac_rx_9217_12287_oct_pkt_num;
+ u64 mac_rx_12288_16383_oct_pkt_num;
+ u64 mac_rx_1519_max_good_oct_pkt_num;
+ u64 mac_rx_1519_max_bad_oct_pkt_num;
+
+ u64 mac_tx_fragment_pkt_num;
+ u64 mac_tx_undermin_pkt_num;
+ u64 mac_tx_jabber_pkt_num;
+ u64 mac_tx_err_all_pkt_num;
+ u64 mac_tx_from_app_good_pkt_num;
+ u64 mac_tx_from_app_bad_pkt_num;
+ u64 mac_rx_fragment_pkt_num;
+ u64 mac_rx_undermin_pkt_num;
+ u64 mac_rx_jabber_pkt_num;
+ u64 mac_rx_fcs_err_pkt_num;
+ u64 mac_rx_send_app_good_pkt_num;
+ u64 mac_rx_send_app_bad_pkt_num;
+ u64 mac_tx_pfc_pause_pkt_num;
+ u64 mac_rx_pfc_pause_pkt_num;
+ u64 mac_tx_ctrl_pkt_num;
+ u64 mac_rx_ctrl_pkt_num;
+};
+
+#define HCLGE_STATS_TIMER_INTERVAL 300UL
+
+struct hclge_vlan_type_cfg {
+ u16 rx_ot_fst_vlan_type;
+ u16 rx_ot_sec_vlan_type;
+ u16 rx_in_fst_vlan_type;
+ u16 rx_in_sec_vlan_type;
+ u16 tx_ot_vlan_type;
+ u16 tx_in_vlan_type;
+};
+
+enum HCLGE_FD_MODE {
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_400B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_1K_WIDTH_400B_STAGE_2,
+ HCLGE_FD_MODE_DEPTH_4K_WIDTH_200B_STAGE_1,
+ HCLGE_FD_MODE_DEPTH_2K_WIDTH_200B_STAGE_2,
+};
+
+enum HCLGE_FD_KEY_TYPE {
+ HCLGE_FD_KEY_BASE_ON_PTYPE,
+ HCLGE_FD_KEY_BASE_ON_TUPLE,
+};
+
+enum HCLGE_FD_STAGE {
+ HCLGE_FD_STAGE_1,
+ HCLGE_FD_STAGE_2,
+ MAX_STAGE_NUM,
+};
+
+/* OUTER_XXX indicates tuples in tunnel header of tunnel packet
+ * INNER_XXX indicate tuples in tunneled header of tunnel packet or
+ * tuples of non-tunnel packet
+ */
+enum HCLGE_FD_TUPLE {
+ OUTER_DST_MAC,
+ OUTER_SRC_MAC,
+ OUTER_VLAN_TAG_FST,
+ OUTER_VLAN_TAG_SEC,
+ OUTER_ETH_TYPE,
+ OUTER_L2_RSV,
+ OUTER_IP_TOS,
+ OUTER_IP_PROTO,
+ OUTER_SRC_IP,
+ OUTER_DST_IP,
+ OUTER_L3_RSV,
+ OUTER_SRC_PORT,
+ OUTER_DST_PORT,
+ OUTER_L4_RSV,
+ OUTER_TUN_VNI,
+ OUTER_TUN_FLOW_ID,
+ INNER_DST_MAC,
+ INNER_SRC_MAC,
+ INNER_VLAN_TAG_FST,
+ INNER_VLAN_TAG_SEC,
+ INNER_ETH_TYPE,
+ INNER_L2_RSV,
+ INNER_IP_TOS,
+ INNER_IP_PROTO,
+ INNER_SRC_IP,
+ INNER_DST_IP,
+ INNER_L3_RSV,
+ INNER_SRC_PORT,
+ INNER_DST_PORT,
+ INNER_L4_RSV,
+ MAX_TUPLE,
+};
+
+enum HCLGE_FD_META_DATA {
+ PACKET_TYPE_ID,
+ IP_FRAGEMENT,
+ ROCE_TYPE,
+ NEXT_KEY,
+ VLAN_NUMBER,
+ SRC_VPORT,
+ DST_VPORT,
+ TUNNEL_PACKET,
+ MAX_META_DATA,
+};
+
+struct key_info {
+ u8 key_type;
+ u8 key_length; /* use bit as unit */
+};
+
+#define MAX_KEY_LENGTH 400
+#define MAX_KEY_DWORDS DIV_ROUND_UP(MAX_KEY_LENGTH / 8, 4)
+#define MAX_KEY_BYTES (MAX_KEY_DWORDS * 4)
+#define MAX_META_DATA_LENGTH 32
+
+/* assigned by firmware, the real filter number for each pf may be less */
+#define MAX_FD_FILTER_NUM 4096
+#define HCLGE_ARFS_EXPIRE_INTERVAL 5UL
+
+enum HCLGE_FD_ACTIVE_RULE_TYPE {
+ HCLGE_FD_RULE_NONE,
+ HCLGE_FD_ARFS_ACTIVE,
+ HCLGE_FD_EP_ACTIVE,
+};
+
+enum HCLGE_FD_PACKET_TYPE {
+ NIC_PACKET,
+ ROCE_PACKET,
+};
+
+enum HCLGE_FD_ACTION {
+ HCLGE_FD_ACTION_ACCEPT_PACKET,
+ HCLGE_FD_ACTION_DROP_PACKET,
+};
+
+struct hclge_fd_key_cfg {
+ u8 key_sel;
+ u8 inner_sipv6_word_en;
+ u8 inner_dipv6_word_en;
+ u8 outer_sipv6_word_en;
+ u8 outer_dipv6_word_en;
+ u32 tuple_active;
+ u32 meta_data_active;
+};
+
+struct hclge_fd_cfg {
+ u8 fd_mode;
+ u16 max_key_length; /* use bit as unit */
+ u32 rule_num[MAX_STAGE_NUM]; /* rule entry number */
+ u16 cnt_num[MAX_STAGE_NUM]; /* rule hit counter number */
+ struct hclge_fd_key_cfg key_cfg[MAX_STAGE_NUM];
+};
+
+#define IPV4_INDEX 3
+#define IPV6_SIZE 4
+struct hclge_fd_rule_tuples {
+ u8 src_mac[ETH_ALEN];
+ u8 dst_mac[ETH_ALEN];
+ /* Be compatible for ip address of both ipv4 and ipv6.
+ * For ipv4 address, we store it in src/dst_ip[3].
+ */
+ u32 src_ip[IPV6_SIZE];
+ u32 dst_ip[IPV6_SIZE];
+ u16 src_port;
+ u16 dst_port;
+ u16 vlan_tag1;
+ u16 ether_proto;
+ u8 ip_tos;
+ u8 ip_proto;
+};
+
+struct hclge_fd_rule {
+ struct hlist_node rule_node;
+ struct hclge_fd_rule_tuples tuples;
+ struct hclge_fd_rule_tuples tuples_mask;
+ u32 unused_tuple;
+ u32 flow_type;
+ u8 action;
+ u16 vf_id;
+ u16 queue_id;
+ u16 location;
+ u16 flow_id; /* only used for arfs */
+ enum HCLGE_FD_ACTIVE_RULE_TYPE rule_type;
+};
+
+struct hclge_fd_ad_data {
+ u16 ad_id;
+ u8 drop_packet;
+ u8 forward_to_direct_queue;
+ u16 queue_id;
+ u8 use_counter;
+ u8 counter_id;
+ u8 use_next_stage;
+ u8 write_rule_id_to_bd;
+ u8 next_input_key;
+ u16 rule_id;
+};
+
+enum HCLGE_MAC_NODE_STATE {
+ HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ACTIVE
+};
+
+struct hclge_mac_node {
+ struct list_head node;
+ enum HCLGE_MAC_NODE_STATE state;
+ u8 mac_addr[ETH_ALEN];
+};
+
+enum HCLGE_MAC_ADDR_TYPE {
+ HCLGE_MAC_ADDR_UC,
+ HCLGE_MAC_ADDR_MC
+};
+
+struct hclge_vport_vlan_cfg {
+ struct list_head node;
+ int hd_tbl_status;
+ u16 vlan_id;
+};
+
+struct hclge_rst_stats {
+ u32 reset_done_cnt; /* the number of reset has completed */
+ u32 hw_reset_done_cnt; /* the number of HW reset has completed */
+ u32 pf_rst_cnt; /* the number of PF reset */
+ u32 flr_rst_cnt; /* the number of FLR */
+ u32 global_rst_cnt; /* the number of GLOBAL */
+ u32 imp_rst_cnt; /* the number of IMP reset */
+ u32 reset_cnt; /* the number of reset */
+ u32 reset_fail_cnt; /* the number of reset fail */
+};
+
+/* time and register status when mac tunnel interruption occur */
+struct hclge_mac_tnl_stats {
+ u64 time;
+ u32 status;
+};
+
+#define HCLGE_RESET_INTERVAL (10 * HZ)
+#define HCLGE_WAIT_RESET_DONE 100
+
+#pragma pack(1)
+struct hclge_vf_vlan_cfg {
+ u8 mbx_cmd;
+ u8 subcode;
+ u8 is_kill;
+ u16 vlan;
+ u16 proto;
+};
+
+#pragma pack()
+
+/* For each bit of TCAM entry, it uses a pair of 'x' and
+ * 'y' to indicate which value to match, like below:
+ * ----------------------------------
+ * | bit x | bit y | search value |
+ * ----------------------------------
+ * | 0 | 0 | always hit |
+ * ----------------------------------
+ * | 1 | 0 | match '0' |
+ * ----------------------------------
+ * | 0 | 1 | match '1' |
+ * ----------------------------------
+ * | 1 | 1 | invalid |
+ * ----------------------------------
+ * Then for input key(k) and mask(v), we can calculate the value by
+ * the formulae:
+ * x = (~k) & v
+ * y = (k ^ ~v) & k
+ */
+#define calc_x(x, k, v) ((x) = (~(k) & (v)))
+#define calc_y(y, k, v) \
+ do { \
+ const typeof(k) _k_ = (k); \
+ const typeof(v) _v_ = (v); \
+ (y) = (_k_ ^ ~_v_) & (_k_); \
+ } while (0)
+
+#define HCLGE_MAC_TNL_LOG_SIZE 8
+#define HCLGE_VPORT_NUM 256
+struct hclge_dev {
+ struct pci_dev *pdev;
+ struct hnae3_ae_dev *ae_dev;
+ struct hclge_hw hw;
+ struct hclge_misc_vector misc_vector;
+ struct hclge_mac_stats mac_stats;
+ unsigned long state;
+ unsigned long flr_state;
+ unsigned long last_reset_time;
+
+ enum hnae3_reset_type reset_type;
+ enum hnae3_reset_type reset_level;
+ unsigned long default_reset_request;
+ unsigned long reset_request; /* reset has been requested */
+ unsigned long reset_pending; /* client rst is pending to be served */
+ struct hclge_rst_stats rst_stats;
+ struct semaphore reset_sem; /* protect reset process */
+ u32 fw_version;
+ u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */
+ u16 num_tqps; /* Num task queue pairs of this PF */
+ u16 num_req_vfs; /* Num VFs requested for this PF */
+
+ u16 base_tqp_pid; /* Base task tqp physical id of this PF */
+ u16 alloc_rss_size; /* Allocated RSS task queue */
+ u16 rss_size_max; /* HW defined max RSS task queue */
+
+ u16 fdir_pf_filter_count; /* Num of guaranteed filters for this PF */
+ u16 num_alloc_vport; /* Num vports this driver supports */
+ u32 numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_tx_desc; /* desc num of per tx queue */
+ u16 num_rx_desc; /* desc num of per rx queue */
+ u8 hw_tc_map;
+ enum hclge_fc_mode fc_mode_last_time;
+ u8 support_sfp_query;
+
+#define HCLGE_FLAG_TC_BASE_SCH_MODE 1
+#define HCLGE_FLAG_VNET_BASE_SCH_MODE 2
+ u8 tx_sch_mode;
+ u8 tc_max;
+ u8 pfc_max;
+
+ u8 default_up;
+ u8 dcbx_cap;
+ struct hclge_tm_info tm_info;
+
+ u16 num_msi;
+ u16 num_msi_left;
+ u16 num_msi_used;
+ u16 roce_base_msix_offset;
+ u32 base_msi_vector;
+ u16 *vector_status;
+ int *vector_irq;
+ u16 num_nic_msi; /* Num of nic vectors for this PF */
+ u16 num_roce_msi; /* Num of roce vectors for this PF */
+ int roce_base_vector;
+
+ unsigned long service_timer_period;
+ unsigned long service_timer_previous;
+ struct timer_list reset_timer;
+ struct delayed_work service_task;
+
+ bool cur_promisc;
+ int num_alloc_vfs; /* Actual number of VFs allocated */
+
+ struct hclge_tqp *htqp;
+ struct hclge_vport *vport;
+
+ struct dentry *hclge_dbgfs;
+
+ struct hnae3_client *nic_client;
+ struct hnae3_client *roce_client;
+
+#define HCLGE_FLAG_MAIN BIT(0)
+#define HCLGE_FLAG_DCB_CAPABLE BIT(1)
+#define HCLGE_FLAG_DCB_ENABLE BIT(2)
+#define HCLGE_FLAG_MQPRIO_ENABLE BIT(3)
+ u32 flag;
+
+ u32 pkt_buf_size; /* Total pf buf size for tx/rx */
+ u32 tx_buf_size; /* Tx buffer size for each TC */
+ u32 dv_buf_size; /* Dv buffer size for each TC */
+
+ u32 mps; /* Max packet size */
+ /* vport_lock protect resource shared by vports */
+ struct mutex vport_lock;
+
+ struct hclge_vlan_type_cfg vlan_type_cfg;
+
+ unsigned long vlan_table[VLAN_N_VID][BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+ unsigned long vf_vlan_full[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
+ unsigned long vport_config_block[BITS_TO_LONGS(HCLGE_VPORT_NUM)];
+
+ struct hclge_fd_cfg fd_cfg;
+ struct hlist_head fd_rule_list;
+ spinlock_t fd_rule_lock; /* protect fd_rule_list and fd_bmap */
+ u16 hclge_fd_rule_num;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
+ unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)];
+ enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type;
+ u8 fd_en;
+
+ u16 wanted_umv_size;
+ /* max available unicast mac vlan space */
+ u16 max_umv_size;
+ /* private unicast mac vlan space, it's same for PF and its VFs */
+ u16 priv_umv_size;
+ /* unicast mac vlan space shared by PF and its VFs */
+ u16 share_umv_size;
+
+ DECLARE_KFIFO(mac_tnl_log, struct hclge_mac_tnl_stats,
+ HCLGE_MAC_TNL_LOG_SIZE);
+
+ /* affinity mask and notify for misc interrupt */
+ cpumask_t affinity_mask;
+ struct irq_affinity_notify affinity_notify;
+};
+
+/* VPort level vlan tag configuration for TX direction */
+struct hclge_tx_vtag_cfg {
+ bool accept_tag1; /* Whether accept tag1 packet from host */
+ bool accept_untag1; /* Whether accept untag1 packet from host */
+ bool accept_tag2;
+ bool accept_untag2;
+ bool insert_tag1_en; /* Whether insert inner vlan tag */
+ bool insert_tag2_en; /* Whether insert outer vlan tag */
+ u16 default_tag1; /* The default inner vlan tag to insert */
+ u16 default_tag2; /* The default outer vlan tag to insert */
+};
+
+/* VPort level vlan tag configuration for RX direction */
+struct hclge_rx_vtag_cfg {
+ u8 rx_vlan_offload_en; /* Whether enable rx vlan offload */
+ u8 strip_tag1_en; /* Whether strip inner vlan tag */
+ u8 strip_tag2_en; /* Whether strip outer vlan tag */
+ u8 vlan1_vlan_prionly; /* Inner VLAN Tag up to descriptor Enable */
+ u8 vlan2_vlan_prionly; /* Outer VLAN Tag up to descriptor Enable */
+};
+
+struct hclge_rss_tuple_cfg {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+};
+
+enum HCLGE_VPORT_STATE {
+ HCLGE_VPORT_STATE_ALIVE,
+ HCLGE_VPORT_STATE_MAC_TBL_CHANGE,
+ HCLGE_VPORT_STATE_MAX
+};
+
+struct hclge_vlan_info {
+ u16 vlan_proto; /* so far support 802.1Q only */
+ u16 qos;
+ u16 vlan_tag;
+};
+
+struct hclge_port_base_vlan_config {
+ u16 state;
+ struct hclge_vlan_info vlan_info;
+};
+
+struct hclge_vf_info {
+ int link_state;
+ u8 mac[ETH_ALEN];
+ u32 spoofchk;
+ u32 max_tx_rate;
+ u32 trusted;
+ u16 promisc_enable;
+};
+
+struct hclge_vport {
+ u16 alloc_tqps; /* Allocated Tx/Rx queues */
+
+ u8 rss_hash_key[HCLGE_RSS_KEY_SIZE]; /* User configured hash keys */
+ /* User configured lookup table entries */
+ u8 rss_indirection_tbl[HCLGE_RSS_IND_TBL_SIZE];
+ int rss_algo; /* User configured hash algorithm */
+ /* User configured rss tuple sets */
+ struct hclge_rss_tuple_cfg rss_tuple_sets;
+
+ u16 alloc_rss_size;
+
+ u16 qs_offset;
+ u32 bw_limit; /* VSI BW Limit (0 = disabled) */
+ u8 dwrr;
+
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+ struct hclge_port_base_vlan_config port_base_vlan_cfg;
+ struct hclge_tx_vtag_cfg txvlan_cfg;
+ struct hclge_rx_vtag_cfg rxvlan_cfg;
+
+ u16 used_umv_num;
+
+ u16 vport_id;
+ struct hclge_dev *back; /* Back reference to associated dev */
+ struct hnae3_handle nic;
+ struct hnae3_handle roce;
+
+ unsigned long state;
+ unsigned long last_active_jiffies;
+ u32 mps; /* Max packet size */
+ struct hclge_vf_info vf_info;
+
+ u8 overflow_promisc_flags;
+ u8 last_promisc_flags;
+
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
+ struct list_head uc_mac_list; /* Store VF unicast table */
+ struct list_head mc_mac_list; /* Store VF multicast table */
+ struct list_head vlan_list; /* Store VF vlan table */
+};
+
+int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc,
+ bool en_mc_pmc, bool en_bc_pmc);
+int hclge_add_uc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr);
+int hclge_rm_uc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr);
+int hclge_add_mc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr);
+int hclge_rm_mc_addr_common(struct hclge_vport *vport,
+ const unsigned char *addr);
+
+struct hclge_vport *hclge_get_vport(struct hnae3_handle *handle);
+int hclge_bind_ring_with_vector(struct hclge_vport *vport,
+ int vector_id, bool en,
+ struct hnae3_ring_chain_node *ring_chain);
+
+static inline int hclge_get_queue_id(struct hnae3_queue *queue)
+{
+ struct hclge_tqp *tqp = container_of(queue, struct hclge_tqp, q);
+
+ return tqp->index;
+}
+
+static inline bool hclge_is_reset_pending(struct hclge_dev *hdev)
+{
+ return !!hdev->reset_pending;
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport);
+int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex);
+int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
+ u16 vlan_id, bool is_kill);
+int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable);
+
+int hclge_buffer_alloc(struct hclge_dev *hdev);
+int hclge_rss_init_hw(struct hclge_dev *hdev);
+void hclge_rss_indir_init_cfg(struct hclge_dev *hdev);
+
+void hclge_mbx_handler(struct hclge_dev *hdev);
+int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id);
+void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id);
+int hclge_cfg_flowctrl(struct hclge_dev *hdev);
+int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id);
+int hclge_vport_start(struct hclge_vport *vport);
+void hclge_vport_stop(struct hclge_vport *vport);
+int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu);
+int hclge_dbg_run_cmd(struct hnae3_handle *handle, const char *cmd_buf);
+u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id);
+int hclge_notify_client(struct hclge_dev *hdev,
+ enum hnae3_reset_notify_type type);
+int hclge_update_mac_list(struct hclge_vport *vport,
+ enum HCLGE_MAC_NODE_STATE state,
+ enum HCLGE_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr);
+int hclge_update_mac_node_for_dev_addr(struct hclge_vport *vport,
+ const u8 *old_addr, const u8 *new_addr);
+void hclge_rm_vport_all_mac_table(struct hclge_vport *vport, bool is_del_list,
+ enum HCLGE_MAC_ADDR_TYPE mac_type);
+void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list);
+void hclge_uninit_vport_vlan_table(struct hclge_dev *hdev);
+void hclge_restore_mac_table_common(struct hclge_vport *vport);
+void hclge_restore_vport_vlan_table(struct hclge_vport *vport);
+int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
+ struct hclge_vlan_info *vlan_info);
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+ u16 state, u16 vlan_tag, u16 qos,
+ u16 vlan_proto);
+void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time);
+int hclge_query_bd_num_cmd_send(struct hclge_dev *hdev,
+ struct hclge_desc *desc);
+void hclge_report_hw_error(struct hclge_dev *hdev,
+ enum hnae3_hw_error_type type);
+void hclge_inform_vf_promisc_info(struct hclge_vport *vport);
+void hclge_dbg_dump_rst_info(struct hclge_dev *hdev);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
new file mode 100644
index 000000000..51b7b46f2
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c
@@ -0,0 +1,867 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+#define CREATE_TRACE_POINTS
+#include "hclge_trace.h"
+
+static u16 hclge_errno_to_resp(int errno)
+{
+ int resp = abs(errno);
+
+ /* The status for pf to vf msg cmd is u16, constrainted by HW.
+ * We need to keep the same type with it.
+ * The intput errno is the stander error code, it's safely to
+ * use a u16 to store the abs(errno).
+ */
+ return (u16)resp;
+}
+
+/* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF
+ * receives a mailbox message from VF.
+ * @vport: pointer to struct hclge_vport
+ * @vf_to_pf_req: pointer to hclge_mbx_vf_to_pf_cmd of the original mailbox
+ * message
+ * @resp_status: indicate to VF whether its request success(0) or failed.
+ */
+static int hclge_gen_resp_to_vf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *vf_to_pf_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+ u16 resp;
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ if (resp_msg->len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "PF fail to gen resp to VF len %u exceeds max len %u\n",
+ resp_msg->len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ /* If resp_msg->len is too long, set the value to max length
+ * and return the msg to VF
+ */
+ resp_msg->len = HCLGE_MBX_MAX_RESP_DATA_SIZE;
+ }
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid;
+ resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len;
+ resp_pf_to_vf->match_id = vf_to_pf_req->match_id;
+
+ resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP;
+ resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code;
+ resp_pf_to_vf->msg.vf_mbx_msg_subcode = vf_to_pf_req->msg.subcode;
+ resp = hclge_errno_to_resp(resp_msg->status);
+ if (resp < SHRT_MAX) {
+ resp_pf_to_vf->msg.resp_status = resp;
+ } else {
+ dev_warn(&hdev->pdev->dev,
+ "failed to send response to VF, response status %d is out-of-bound\n",
+ resp);
+ resp_pf_to_vf->msg.resp_status = EIO;
+ }
+
+ if (resp_msg->len > 0)
+ memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data,
+ resp_msg->len);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "failed to send response to VF, status: %d, vfid: %u, code: %u, subcode: %u.\n",
+ status, vf_to_pf_req->mbx_src_vfid,
+ vf_to_pf_req->msg.code, vf_to_pf_req->msg.subcode);
+
+ return status;
+}
+
+static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len,
+ u16 mbx_opcode, u8 dest_vfid)
+{
+ struct hclge_mbx_pf_to_vf_cmd *resp_pf_to_vf;
+ struct hclge_dev *hdev = vport->back;
+ enum hclge_cmd_status status;
+ struct hclge_desc desc;
+
+ if (msg_len > HCLGE_MBX_MAX_MSG_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "msg data length(=%u) exceeds maximum(=%u)\n",
+ msg_len, HCLGE_MBX_MAX_MSG_SIZE);
+ return -EMSGSIZE;
+ }
+
+ resp_pf_to_vf = (struct hclge_mbx_pf_to_vf_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_PF_TO_VF, false);
+
+ resp_pf_to_vf->dest_vfid = dest_vfid;
+ resp_pf_to_vf->msg_len = msg_len;
+ resp_pf_to_vf->msg.code = mbx_opcode;
+
+ memcpy(&resp_pf_to_vf->msg.vf_mbx_msg_code, msg, msg_len);
+
+ trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf);
+
+ status = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "failed to send mailbox to VF, status: %d, vfid: %u, opcode: %u\n",
+ status, dest_vfid, mbx_opcode);
+
+ return status;
+}
+
+int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ u16 reset_type;
+ u8 msg_data[2];
+ u8 dest_vfid;
+
+ BUILD_BUG_ON(HNAE3_MAX_RESET > U16_MAX);
+
+ dest_vfid = (u8)vport->vport_id;
+
+ if (hdev->reset_type == HNAE3_FUNC_RESET)
+ reset_type = HNAE3_VF_PF_FUNC_RESET;
+ else if (hdev->reset_type == HNAE3_FLR_RESET)
+ reset_type = HNAE3_VF_FULL_RESET;
+ else
+ reset_type = HNAE3_VF_FUNC_RESET;
+
+ memcpy(&msg_data[0], &reset_type, sizeof(u16));
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HCLGE_MBX_ASSERTING_RESET, dest_vfid);
+}
+
+static void hclge_free_vector_ring_chain(struct hnae3_ring_chain_node *head)
+{
+ struct hnae3_ring_chain_node *chain_tmp, *chain;
+
+ chain = head->next;
+
+ while (chain) {
+ chain_tmp = chain->next;
+ kfree_sensitive(chain);
+ chain = chain_tmp;
+ }
+}
+
+/* hclge_get_ring_chain_from_mbx: get ring type & tqp id & int_gl idx
+ * from mailbox message
+ * msg[0]: opcode
+ * msg[1]: <not relevant to this function>
+ * msg[2]: ring_num
+ * msg[3]: first ring type (TX|RX)
+ * msg[4]: first tqp id
+ * msg[5]: first int_gl idx
+ * msg[6] ~ msg[14]: other ring type, tqp id and int_gl idx
+ */
+static int hclge_get_ring_chain_from_mbx(
+ struct hclge_mbx_vf_to_pf_cmd *req,
+ struct hnae3_ring_chain_node *ring_chain,
+ struct hclge_vport *vport)
+{
+ struct hnae3_ring_chain_node *cur_chain, *new_chain;
+ struct hclge_dev *hdev = vport->back;
+ int ring_num;
+ int i;
+
+ ring_num = req->msg.ring_num;
+
+ if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM)
+ return -EINVAL;
+
+ for (i = 0; i < ring_num; i++) {
+ if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) {
+ dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n",
+ req->msg.param[i].tqp_index,
+ vport->nic.kinfo.rss_size - 1);
+ return -EINVAL;
+ }
+ }
+
+ hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg.param[0].ring_type);
+ ring_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg.param[0].tqp_index]);
+ hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index);
+
+ cur_chain = ring_chain;
+
+ for (i = 1; i < ring_num; i++) {
+ new_chain = kzalloc(sizeof(*new_chain), GFP_KERNEL);
+ if (!new_chain)
+ goto err;
+
+ hnae3_set_bit(new_chain->flag, HNAE3_RING_TYPE_B,
+ req->msg.param[i].ring_type);
+
+ new_chain->tqp_index =
+ hclge_get_queue_id(vport->nic.kinfo.tqp
+ [req->msg.param[i].tqp_index]);
+
+ hnae3_set_field(new_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S,
+ req->msg.param[i].int_gl_index);
+
+ cur_chain->next = new_chain;
+ cur_chain = new_chain;
+ }
+
+ return 0;
+err:
+ hclge_free_vector_ring_chain(ring_chain);
+ return -ENOMEM;
+}
+
+static int hclge_map_unmap_ring_to_vf_vector(struct hclge_vport *vport, bool en,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ struct hnae3_ring_chain_node ring_chain;
+ int vector_id = req->msg.vector_id;
+ int ret;
+
+ memset(&ring_chain, 0, sizeof(ring_chain));
+ ret = hclge_get_ring_chain_from_mbx(req, &ring_chain, vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_bind_ring_with_vector(vport, vector_id, en, &ring_chain);
+
+ hclge_free_vector_ring_chain(&ring_chain);
+
+ return ret;
+}
+
+static int hclge_set_vf_promisc_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ bool en_bc = req->msg.en_bc ? true : false;
+ bool en_uc = req->msg.en_uc ? true : false;
+ bool en_mc = req->msg.en_mc ? true : false;
+ int ret;
+
+ if (!vport->vf_info.trusted) {
+ en_uc = false;
+ en_mc = false;
+ }
+
+ ret = hclge_set_vport_promisc_mode(vport, en_uc, en_mc, en_bc);
+
+ vport->vf_info.promisc_enable = (en_uc || en_mc) ? 1 : 0;
+
+ return ret;
+}
+
+void hclge_inform_vf_promisc_info(struct hclge_vport *vport)
+{
+ u8 dest_vfid = (u8)vport->vport_id;
+ u8 msg_data[2];
+
+ memcpy(&msg_data[0], &vport->vf_info.promisc_enable, sizeof(u16));
+
+ hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HCLGE_MBX_PUSH_PROMISC_INFO, dest_vfid);
+}
+
+static int hclge_set_vf_uc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+#define HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET 6
+
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
+ struct hclge_dev *hdev = vport->back;
+ int status;
+
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_MODIFY) {
+ const u8 *old_addr = (const u8 *)
+ (&mbx_req->msg.data[HCLGE_MBX_VF_OLD_MAC_ADDR_OFFSET]);
+
+ /* If VF MAC has been configured by the host then it
+ * cannot be overridden by the MAC specified by the VM.
+ */
+ if (!is_zero_ether_addr(vport->vf_info.mac) &&
+ !ether_addr_equal(mac_addr, vport->vf_info.mac))
+ return -EPERM;
+
+ if (!is_valid_ether_addr(mac_addr))
+ return -EINVAL;
+
+ spin_lock_bh(&vport->mac_list_lock);
+ status = hclge_update_mac_node_for_dev_addr(vport, old_addr,
+ mac_addr);
+ spin_unlock_bh(&vport->mac_list_lock);
+ hclge_task_schedule(hdev, 0);
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_ADD) {
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_UC, mac_addr);
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_UC_REMOVE) {
+ status = hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_UC, mac_addr);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set unicast mac addr, unknown subcode %u\n",
+ mbx_req->msg.subcode);
+ return -EIO;
+ }
+
+ return status;
+}
+
+static int hclge_set_vf_mc_mac_addr(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ const u8 *mac_addr = (const u8 *)(mbx_req->msg.data);
+ struct hclge_dev *hdev = vport->back;
+
+ if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_ADD) {
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_ADD,
+ HCLGE_MAC_ADDR_MC, mac_addr);
+ } else if (mbx_req->msg.subcode == HCLGE_MBX_MAC_VLAN_MC_REMOVE) {
+ hclge_update_mac_list(vport, HCLGE_MAC_TO_DEL,
+ HCLGE_MAC_ADDR_MC, mac_addr);
+ } else {
+ dev_err(&hdev->pdev->dev,
+ "failed to set mcast mac addr, unknown subcode %u\n",
+ mbx_req->msg.subcode);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hclge_push_vf_port_base_vlan_info(struct hclge_vport *vport, u8 vfid,
+ u16 state, u16 vlan_tag, u16 qos,
+ u16 vlan_proto)
+{
+#define MSG_DATA_SIZE 8
+
+ u8 msg_data[MSG_DATA_SIZE];
+
+ memcpy(&msg_data[0], &state, sizeof(u16));
+ memcpy(&msg_data[2], &vlan_proto, sizeof(u16));
+ memcpy(&msg_data[4], &qos, sizeof(u16));
+ memcpy(&msg_data[6], &vlan_tag, sizeof(u16));
+
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HCLGE_MBX_PUSH_VLAN_INFO, vfid);
+}
+
+static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_MBX_VLAN_STATE_OFFSET 0
+#define HCLGE_MBX_VLAN_INFO_OFFSET 2
+
+ struct hclge_vf_vlan_cfg *msg_cmd;
+ int status = 0;
+
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
+ if (msg_cmd->subcode == HCLGE_MBX_VLAN_FILTER) {
+ struct hnae3_handle *handle = &vport->nic;
+ u16 vlan, proto;
+ bool is_kill;
+
+ is_kill = !!msg_cmd->is_kill;
+ vlan = msg_cmd->vlan;
+ proto = msg_cmd->proto;
+ status = hclge_set_vlan_filter(handle, cpu_to_be16(proto),
+ vlan, is_kill);
+ } else if (msg_cmd->subcode == HCLGE_MBX_VLAN_RX_OFF_CFG) {
+ struct hnae3_handle *handle = &vport->nic;
+ bool en = msg_cmd->is_kill ? true : false;
+
+ status = hclge_en_hw_strip_rxvtag(handle, en);
+ } else if (msg_cmd->subcode == HCLGE_MBX_PORT_BASE_VLAN_CFG) {
+ struct hclge_vlan_info *vlan_info;
+ u16 *state;
+
+ state = (u16 *)&mbx_req->msg.data[HCLGE_MBX_VLAN_STATE_OFFSET];
+ vlan_info = (struct hclge_vlan_info *)
+ &mbx_req->msg.data[HCLGE_MBX_VLAN_INFO_OFFSET];
+ status = hclge_update_port_base_vlan_cfg(vport, *state,
+ vlan_info);
+ } else if (msg_cmd->subcode == HCLGE_MBX_GET_PORT_BASE_VLAN_STATE) {
+ resp_msg->data[0] = vport->port_base_vlan_cfg.state;
+ resp_msg->len = sizeof(u8);
+ }
+
+ return status;
+}
+
+static int hclge_set_vf_alive(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ bool alive = !!mbx_req->msg.data[0];
+ int ret = 0;
+
+ if (alive)
+ ret = hclge_vport_start(vport);
+ else
+ hclge_vport_stop(vport);
+
+ return ret;
+}
+
+static void hclge_get_vf_tcinfo(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ unsigned int i;
+
+ for (i = 0; i < kinfo->num_tc; i++)
+ resp_msg->data[0] |= BIT(i);
+
+ resp_msg->len = sizeof(u8);
+}
+
+static void hclge_get_vf_queue_info(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_TQPS_RSS_INFO_LEN 6
+#define HCLGE_TQPS_ALLOC_OFFSET 0
+#define HCLGE_TQPS_RSS_SIZE_OFFSET 2
+#define HCLGE_TQPS_RX_BUFFER_LEN_OFFSET 4
+
+ struct hclge_dev *hdev = vport->back;
+
+ /* get the queue related info */
+ memcpy(&resp_msg->data[HCLGE_TQPS_ALLOC_OFFSET],
+ &vport->alloc_tqps, sizeof(u16));
+ memcpy(&resp_msg->data[HCLGE_TQPS_RSS_SIZE_OFFSET],
+ &vport->nic.kinfo.rss_size, sizeof(u16));
+ memcpy(&resp_msg->data[HCLGE_TQPS_RX_BUFFER_LEN_OFFSET],
+ &hdev->rx_buf_len, sizeof(u16));
+ resp_msg->len = HCLGE_TQPS_RSS_INFO_LEN;
+}
+
+static void hclge_get_vf_mac_addr(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+ ether_addr_copy(resp_msg->data, vport->vf_info.mac);
+ resp_msg->len = ETH_ALEN;
+}
+
+static void hclge_get_vf_queue_depth(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_TQPS_DEPTH_INFO_LEN 4
+#define HCLGE_TQPS_NUM_TX_DESC_OFFSET 0
+#define HCLGE_TQPS_NUM_RX_DESC_OFFSET 2
+
+ struct hclge_dev *hdev = vport->back;
+
+ /* get the queue depth info */
+ memcpy(&resp_msg->data[HCLGE_TQPS_NUM_TX_DESC_OFFSET],
+ &hdev->num_tx_desc, sizeof(u16));
+ memcpy(&resp_msg->data[HCLGE_TQPS_NUM_RX_DESC_OFFSET],
+ &hdev->num_rx_desc, sizeof(u16));
+ resp_msg->len = HCLGE_TQPS_DEPTH_INFO_LEN;
+}
+
+static void hclge_get_vf_media_type(struct hclge_vport *vport,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_VF_MEDIA_TYPE_OFFSET 0
+#define HCLGE_VF_MODULE_TYPE_OFFSET 1
+#define HCLGE_VF_MEDIA_TYPE_LENGTH 2
+
+ struct hclge_dev *hdev = vport->back;
+
+ resp_msg->data[HCLGE_VF_MEDIA_TYPE_OFFSET] =
+ hdev->hw.mac.media_type;
+ resp_msg->data[HCLGE_VF_MODULE_TYPE_OFFSET] =
+ hdev->hw.mac.module_type;
+ resp_msg->len = HCLGE_VF_MEDIA_TYPE_LENGTH;
+}
+
+static int hclge_get_link_info(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+#define HCLGE_VF_LINK_STATE_UP 1U
+#define HCLGE_VF_LINK_STATE_DOWN 0U
+
+ struct hclge_dev *hdev = vport->back;
+ u16 link_status;
+ u8 msg_data[8];
+ u8 dest_vfid;
+ u16 duplex;
+
+ /* mac.link can only be 0 or 1 */
+ switch (vport->vf_info.link_state) {
+ case IFLA_VF_LINK_STATE_ENABLE:
+ link_status = HCLGE_VF_LINK_STATE_UP;
+ break;
+ case IFLA_VF_LINK_STATE_DISABLE:
+ link_status = HCLGE_VF_LINK_STATE_DOWN;
+ break;
+ case IFLA_VF_LINK_STATE_AUTO:
+ default:
+ link_status = (u16)hdev->hw.mac.link;
+ break;
+ }
+
+ duplex = hdev->hw.mac.duplex;
+ memcpy(&msg_data[0], &link_status, sizeof(u16));
+ memcpy(&msg_data[2], &hdev->hw.mac.speed, sizeof(u32));
+ memcpy(&msg_data[6], &duplex, sizeof(u16));
+ dest_vfid = mbx_req->mbx_src_vfid;
+
+ /* send this requested info to VF */
+ return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HCLGE_MBX_LINK_STAT_CHANGE, dest_vfid);
+}
+
+static void hclge_get_link_mode(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+#define HCLGE_SUPPORTED 1
+ struct hclge_dev *hdev = vport->back;
+ unsigned long advertising;
+ unsigned long supported;
+ unsigned long send_data;
+ u8 msg_data[10] = {};
+ u8 dest_vfid;
+
+ advertising = hdev->hw.mac.advertising[0];
+ supported = hdev->hw.mac.supported[0];
+ dest_vfid = mbx_req->mbx_src_vfid;
+ msg_data[0] = mbx_req->msg.data[0];
+
+ send_data = msg_data[0] == HCLGE_SUPPORTED ? supported : advertising;
+
+ memcpy(&msg_data[2], &send_data, sizeof(unsigned long));
+ hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data),
+ HCLGE_MBX_LINK_STAT_MODE, dest_vfid);
+}
+
+static void hclge_mbx_reset_vf_queue(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ u16 queue_id;
+
+ memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+
+ hclge_reset_vf_queue(vport, queue_id);
+}
+
+static int hclge_reset_vf(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+
+ dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %u!",
+ vport->vport_id);
+
+ return hclge_func_reset_cmd(hdev, vport->vport_id);
+}
+
+static void hclge_vf_keep_alive(struct hclge_vport *vport)
+{
+ vport->last_active_jiffies = jiffies;
+}
+
+static int hclge_set_vf_mtu(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ u32 mtu;
+
+ memcpy(&mtu, mbx_req->msg.data, sizeof(mtu));
+
+ return hclge_set_vport_mtu(vport, mtu);
+}
+
+static int hclge_get_queue_id_in_pf(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+ struct hnae3_handle *handle = &vport->nic;
+ struct hclge_dev *hdev = vport->back;
+ u16 queue_id, qid_in_pf;
+
+ memcpy(&queue_id, mbx_req->msg.data, sizeof(queue_id));
+ if (queue_id >= handle->kinfo.num_tqps) {
+ dev_err(&hdev->pdev->dev, "Invalid queue id(%u) from VF %u\n",
+ queue_id, mbx_req->mbx_src_vfid);
+ return -EINVAL;
+ }
+
+ qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id);
+ memcpy(resp_msg->data, &qid_in_pf, sizeof(qid_in_pf));
+ resp_msg->len = sizeof(qid_in_pf);
+ return 0;
+}
+
+static int hclge_get_rss_key(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req,
+ struct hclge_respond_to_vf_msg *resp_msg)
+{
+#define HCLGE_RSS_MBX_RESP_LEN 8
+ struct hclge_dev *hdev = vport->back;
+ u8 index;
+
+ index = mbx_req->msg.data[0];
+
+ /* Check the query index of rss_hash_key from VF, make sure no
+ * more than the size of rss_hash_key.
+ */
+ if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) >
+ sizeof(vport[0].rss_hash_key)) {
+ dev_warn(&hdev->pdev->dev,
+ "failed to get the rss hash key, the index(%u) invalid !\n",
+ index);
+ return -EINVAL;
+ }
+
+ memcpy(resp_msg->data,
+ &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN],
+ HCLGE_RSS_MBX_RESP_LEN);
+ resp_msg->len = HCLGE_RSS_MBX_RESP_LEN;
+ return 0;
+}
+
+static void hclge_link_fail_parse(struct hclge_dev *hdev, u8 link_fail_code)
+{
+ switch (link_fail_code) {
+ case HCLGE_LF_REF_CLOCK_LOST:
+ dev_warn(&hdev->pdev->dev, "Reference clock lost!\n");
+ break;
+ case HCLGE_LF_XSFP_TX_DISABLE:
+ dev_warn(&hdev->pdev->dev, "SFP tx is disabled!\n");
+ break;
+ case HCLGE_LF_XSFP_ABSENT:
+ dev_warn(&hdev->pdev->dev, "SFP is absent!\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void hclge_handle_link_change_event(struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req)
+{
+ hclge_task_schedule(hdev, 0);
+
+ if (!req->msg.subcode)
+ hclge_link_fail_parse(hdev, req->msg.data[0]);
+}
+
+static bool hclge_cmd_crq_empty(struct hclge_hw *hw)
+{
+ u32 tail = hclge_read_dev(hw, HCLGE_NIC_CRQ_TAIL_REG);
+
+ return tail == hw->cmq.crq.next_to_use;
+}
+
+static void hclge_handle_ncsi_error(struct hclge_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = hdev->ae_dev;
+
+ ae_dev->ops->set_default_reset_request(ae_dev, HNAE3_GLOBAL_RESET);
+ dev_warn(&hdev->pdev->dev, "requesting reset due to NCSI error\n");
+ ae_dev->ops->reset_event(hdev->pdev, NULL);
+}
+
+static void hclge_handle_vf_tbl(struct hclge_vport *vport,
+ struct hclge_mbx_vf_to_pf_cmd *mbx_req)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_vf_vlan_cfg *msg_cmd;
+
+ msg_cmd = (struct hclge_vf_vlan_cfg *)&mbx_req->msg;
+ if (msg_cmd->subcode == HCLGE_MBX_VPORT_LIST_CLEAR) {
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(vport, true, HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(vport, true);
+ } else {
+ dev_warn(&hdev->pdev->dev, "Invalid cmd(%u)\n",
+ msg_cmd->subcode);
+ }
+}
+
+void hclge_mbx_handler(struct hclge_dev *hdev)
+{
+ struct hclge_cmq_ring *crq = &hdev->hw.cmq.crq;
+ struct hclge_respond_to_vf_msg resp_msg;
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclge_vport *vport;
+ struct hclge_desc *desc;
+ bool is_del = false;
+ unsigned int flag;
+ int ret = 0;
+
+ /* handle all the mailbox requests in the queue */
+ while (!hclge_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_warn(&hdev->pdev->dev,
+ "command queue needs re-initializing\n");
+ return;
+ }
+
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc->data;
+
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ if (unlikely(!hnae3_get_bit(flag, HCLGE_CMDQ_RX_OUTVLD_B))) {
+ dev_warn(&hdev->pdev->dev,
+ "dropped invalid mailbox message, code = %u\n",
+ req->msg.code);
+
+ /* dropping/not processing this invalid message */
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ continue;
+ }
+
+ vport = &hdev->vport[req->mbx_src_vfid];
+
+ trace_hclge_pf_mbx_get(hdev, req);
+
+ /* clear the resp_msg before processing every mailbox message */
+ memset(&resp_msg, 0, sizeof(resp_msg));
+
+ switch (req->msg.code) {
+ case HCLGE_MBX_MAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, true,
+ req);
+ break;
+ case HCLGE_MBX_UNMAP_RING_TO_VECTOR:
+ ret = hclge_map_unmap_ring_to_vf_vector(vport, false,
+ req);
+ break;
+ case HCLGE_MBX_SET_PROMISC_MODE:
+ ret = hclge_set_vf_promisc_mode(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF promisc mode\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_UNICAST:
+ ret = hclge_set_vf_uc_mac_addr(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF UC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_MULTICAST:
+ ret = hclge_set_vf_mc_mac_addr(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF fail(%d) to set VF MC MAC Addr\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_VLAN:
+ ret = hclge_set_vf_vlan_cfg(vport, req, &resp_msg);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to config VF's VLAN\n",
+ ret);
+ break;
+ case HCLGE_MBX_SET_ALIVE:
+ ret = hclge_set_vf_alive(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "PF failed(%d) to set VF's ALIVE\n",
+ ret);
+ break;
+ case HCLGE_MBX_GET_QINFO:
+ hclge_get_vf_queue_info(vport, &resp_msg);
+ break;
+ case HCLGE_MBX_GET_QDEPTH:
+ hclge_get_vf_queue_depth(vport, &resp_msg);
+ break;
+ case HCLGE_MBX_GET_TCINFO:
+ hclge_get_vf_tcinfo(vport, &resp_msg);
+ break;
+ case HCLGE_MBX_GET_LINK_STATUS:
+ ret = hclge_get_link_info(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "failed to inform link stat to VF, ret = %d\n",
+ ret);
+ break;
+ case HCLGE_MBX_QUEUE_RESET:
+ hclge_mbx_reset_vf_queue(vport, req);
+ break;
+ case HCLGE_MBX_RESET:
+ ret = hclge_reset_vf(vport);
+ break;
+ case HCLGE_MBX_KEEP_ALIVE:
+ hclge_vf_keep_alive(vport);
+ break;
+ case HCLGE_MBX_SET_MTU:
+ ret = hclge_set_vf_mtu(vport, req);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF fail(%d) to set mtu\n", ret);
+ break;
+ case HCLGE_MBX_GET_QID_IN_PF:
+ ret = hclge_get_queue_id_in_pf(vport, req, &resp_msg);
+ break;
+ case HCLGE_MBX_GET_RSS_KEY:
+ ret = hclge_get_rss_key(vport, req, &resp_msg);
+ break;
+ case HCLGE_MBX_GET_LINK_MODE:
+ hclge_get_link_mode(vport, req);
+ break;
+ case HCLGE_MBX_GET_VF_FLR_STATUS:
+ case HCLGE_MBX_VF_UNINIT:
+ is_del = req->msg.code == HCLGE_MBX_VF_UNINIT;
+ hclge_rm_vport_all_mac_table(vport, is_del,
+ HCLGE_MAC_ADDR_UC);
+ hclge_rm_vport_all_mac_table(vport, is_del,
+ HCLGE_MAC_ADDR_MC);
+ hclge_rm_vport_all_vlan_table(vport, is_del);
+ break;
+ case HCLGE_MBX_GET_MEDIA_TYPE:
+ hclge_get_vf_media_type(vport, &resp_msg);
+ break;
+ case HCLGE_MBX_PUSH_LINK_STATUS:
+ hclge_handle_link_change_event(hdev, req);
+ break;
+ case HCLGE_MBX_GET_MAC_ADDR:
+ hclge_get_vf_mac_addr(vport, &resp_msg);
+ break;
+ case HCLGE_MBX_NCSI_ERROR:
+ hclge_handle_ncsi_error(hdev);
+ break;
+ case HCLGE_MBX_HANDLE_VF_TBL:
+ hclge_handle_vf_tbl(vport, req);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "un-supported mailbox message, code = %u\n",
+ req->msg.code);
+ break;
+ }
+
+ /* PF driver should not reply IMP */
+ if (hnae3_get_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B) &&
+ req->msg.code < HCLGE_MBX_GET_VF_FLR_STATUS) {
+ resp_msg.status = ret;
+ hclge_gen_resp_to_vf(vport, req, &resp_msg);
+ }
+
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+
+ /* reinitialize ret after complete the mbx message processing */
+ ret = 0;
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclge_write_dev(&hdev->hw, HCLGE_NIC_CRQ_HEAD_REG, crq->next_to_use);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
new file mode 100644
index 000000000..c194bba18
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c
@@ -0,0 +1,272 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/marvell_phy.h>
+
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_mdio.h"
+
+enum hclge_mdio_c22_op_seq {
+ HCLGE_MDIO_C22_WRITE = 1,
+ HCLGE_MDIO_C22_READ = 2
+};
+
+#define HCLGE_MDIO_CTRL_START_B 0
+#define HCLGE_MDIO_CTRL_ST_S 1
+#define HCLGE_MDIO_CTRL_ST_M (0x3 << HCLGE_MDIO_CTRL_ST_S)
+#define HCLGE_MDIO_CTRL_OP_S 3
+#define HCLGE_MDIO_CTRL_OP_M (0x3 << HCLGE_MDIO_CTRL_OP_S)
+
+#define HCLGE_MDIO_PHYID_S 0
+#define HCLGE_MDIO_PHYID_M (0x1f << HCLGE_MDIO_PHYID_S)
+
+#define HCLGE_MDIO_PHYREG_S 0
+#define HCLGE_MDIO_PHYREG_M (0x1f << HCLGE_MDIO_PHYREG_S)
+
+#define HCLGE_MDIO_STA_B 0
+
+struct hclge_mdio_cfg_cmd {
+ u8 ctrl_bit;
+ u8 phyid;
+ u8 phyad;
+ u8 rsvd;
+ __le16 reserve;
+ __le16 data_wr;
+ __le16 data_rd;
+ __le16 sta;
+};
+
+static int hclge_mdio_write(struct mii_bus *bus, int phyid, int regnum,
+ u16 data)
+{
+ struct hclge_mdio_cfg_cmd *mdio_cmd;
+ struct hclge_dev *hdev = bus->priv;
+ struct hclge_desc desc;
+ int ret;
+
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, false);
+
+ mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
+
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, (u32)phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, (u32)regnum);
+
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_WRITE);
+
+ mdio_cmd->data_wr = cpu_to_le16(data);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mdio write fail when sending cmd, status is %d.\n",
+ ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_mdio_read(struct mii_bus *bus, int phyid, int regnum)
+{
+ struct hclge_mdio_cfg_cmd *mdio_cmd;
+ struct hclge_dev *hdev = bus->priv;
+ struct hclge_desc desc;
+ int ret;
+
+ if (test_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state))
+ return 0;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_MDIO_CONFIG, true);
+
+ mdio_cmd = (struct hclge_mdio_cfg_cmd *)desc.data;
+
+ hnae3_set_field(mdio_cmd->phyid, HCLGE_MDIO_PHYID_M,
+ HCLGE_MDIO_PHYID_S, (u32)phyid);
+ hnae3_set_field(mdio_cmd->phyad, HCLGE_MDIO_PHYREG_M,
+ HCLGE_MDIO_PHYREG_S, (u32)regnum);
+
+ hnae3_set_bit(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_START_B, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_ST_M,
+ HCLGE_MDIO_CTRL_ST_S, 1);
+ hnae3_set_field(mdio_cmd->ctrl_bit, HCLGE_MDIO_CTRL_OP_M,
+ HCLGE_MDIO_CTRL_OP_S, HCLGE_MDIO_C22_READ);
+
+ /* Read out phy data */
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "mdio read fail when get data, status is %d.\n",
+ ret);
+ return ret;
+ }
+
+ if (hnae3_get_bit(le16_to_cpu(mdio_cmd->sta), HCLGE_MDIO_STA_B)) {
+ dev_err(&hdev->pdev->dev, "mdio read data error\n");
+ return -EIO;
+ }
+
+ return le16_to_cpu(mdio_cmd->data_rd);
+}
+
+int hclge_mac_mdio_config(struct hclge_dev *hdev)
+{
+#define PHY_INEXISTENT 255
+
+ struct hclge_mac *mac = &hdev->hw.mac;
+ struct phy_device *phydev;
+ struct mii_bus *mdio_bus;
+ int ret;
+
+ if (hdev->hw.mac.phy_addr == PHY_INEXISTENT) {
+ dev_info(&hdev->pdev->dev,
+ "no phy device is connected to mdio bus\n");
+ return 0;
+ } else if (hdev->hw.mac.phy_addr >= PHY_MAX_ADDR) {
+ dev_err(&hdev->pdev->dev, "phy_addr(%u) is too large.\n",
+ hdev->hw.mac.phy_addr);
+ return -EINVAL;
+ }
+
+ mdio_bus = devm_mdiobus_alloc(&hdev->pdev->dev);
+ if (!mdio_bus)
+ return -ENOMEM;
+
+ mdio_bus->name = "hisilicon MII bus";
+ mdio_bus->read = hclge_mdio_read;
+ mdio_bus->write = hclge_mdio_write;
+ snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "mii",
+ dev_name(&hdev->pdev->dev));
+
+ mdio_bus->parent = &hdev->pdev->dev;
+ mdio_bus->priv = hdev;
+ mdio_bus->phy_mask = ~(1 << mac->phy_addr);
+ ret = mdiobus_register(mdio_bus);
+ if (ret) {
+ dev_err(mdio_bus->parent,
+ "failed to register MDIO bus, ret = %d\n", ret);
+ return ret;
+ }
+
+ phydev = mdiobus_get_phy(mdio_bus, mac->phy_addr);
+ if (!phydev) {
+ dev_err(mdio_bus->parent, "Failed to get phy device\n");
+ mdiobus_unregister(mdio_bus);
+ return -EIO;
+ }
+
+ mac->phydev = phydev;
+ mac->mdio_bus = mdio_bus;
+
+ return 0;
+}
+
+static void hclge_mac_adjust_link(struct net_device *netdev)
+{
+ struct hnae3_handle *h = *((void **)netdev_priv(netdev));
+ struct hclge_vport *vport = hclge_get_vport(h);
+ struct hclge_dev *hdev = vport->back;
+ int duplex, speed;
+ int ret;
+
+ /* When phy link down, do nothing */
+ if (netdev->phydev->link == 0)
+ return;
+
+ speed = netdev->phydev->speed;
+ duplex = netdev->phydev->duplex;
+
+ ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex);
+ if (ret)
+ netdev_err(netdev, "failed to adjust link.\n");
+
+ ret = hclge_cfg_flowctrl(hdev);
+ if (ret)
+ netdev_err(netdev, "failed to configure flow control.\n");
+}
+
+int hclge_mac_connect_phy(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct net_device *netdev = hdev->vport[0].nic.netdev;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
+ int ret;
+
+ if (!phydev)
+ return 0;
+
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported);
+
+ phydev->dev_flags |= MARVELL_PHY_LED0_LINK_LED1_ACTIVE;
+
+ ret = phy_connect_direct(netdev, phydev,
+ hclge_mac_adjust_link,
+ PHY_INTERFACE_MODE_SGMII);
+ if (ret) {
+ netdev_err(netdev, "phy_connect_direct err.\n");
+ return ret;
+ }
+
+ linkmode_copy(mask, hdev->hw.mac.supported);
+ linkmode_and(phydev->supported, phydev->supported, mask);
+ linkmode_copy(phydev->advertising, phydev->supported);
+
+ /* supported flag is Pause and Asym Pause, but default advertising
+ * should be rx on, tx on, so need clear Asym Pause in advertising
+ * flag
+ */
+ linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
+ phydev->advertising);
+
+ phy_attached_info(phydev);
+
+ return 0;
+}
+
+void hclge_mac_disconnect_phy(struct hnae3_handle *handle)
+{
+ struct hclge_vport *vport = hclge_get_vport(handle);
+ struct hclge_dev *hdev = vport->back;
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_disconnect(phydev);
+}
+
+void hclge_mac_start_phy(struct hclge_dev *hdev)
+{
+ struct phy_device *phydev = hdev->hw.mac.phydev;
+
+ if (!phydev)
+ return;
+
+ phy_loopback(phydev, false);
+
+ phy_start(phydev);
+}
+
+void hclge_mac_stop_phy(struct hclge_dev *hdev)
+{
+ struct net_device *netdev = hdev->vport[0].nic.netdev;
+ struct phy_device *phydev = netdev->phydev;
+
+ if (!phydev)
+ return;
+
+ phy_stop(phydev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
new file mode 100644
index 000000000..dd9a1218a
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_MDIO_H
+#define __HCLGE_MDIO_H
+
+int hclge_mac_mdio_config(struct hclge_dev *hdev);
+int hclge_mac_connect_phy(struct hnae3_handle *handle);
+void hclge_mac_disconnect_phy(struct hnae3_handle *handle);
+void hclge_mac_start_phy(struct hclge_dev *hdev);
+void hclge_mac_stop_phy(struct hclge_dev *hdev);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
new file mode 100644
index 000000000..8c5c5562c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -0,0 +1,1505 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+
+#include "hclge_cmd.h"
+#include "hclge_main.h"
+#include "hclge_tm.h"
+
+enum hclge_shaper_level {
+ HCLGE_SHAPER_LVL_PRI = 0,
+ HCLGE_SHAPER_LVL_PG = 1,
+ HCLGE_SHAPER_LVL_PORT = 2,
+ HCLGE_SHAPER_LVL_QSET = 3,
+ HCLGE_SHAPER_LVL_CNT = 4,
+ HCLGE_SHAPER_LVL_VF = 0,
+ HCLGE_SHAPER_LVL_PF = 1,
+};
+
+#define HCLGE_TM_PFC_PKT_GET_CMD_NUM 3
+#define HCLGE_TM_PFC_NUM_GET_PER_CMD 3
+
+#define HCLGE_SHAPER_BS_U_DEF 5
+#define HCLGE_SHAPER_BS_S_DEF 20
+
+/* hclge_shaper_para_calc: calculate ir parameter for the shaper
+ * @ir: Rate to be config, its unit is Mbps
+ * @shaper_level: the shaper level. eg: port, pg, priority, queueset
+ * @ir_para: parameters of IR shaper
+ * @max_tm_rate: max tm rate is available to config
+ *
+ * the formula:
+ *
+ * IR_b * (2 ^ IR_u) * 8
+ * IR(Mbps) = ------------------------- * CLOCK(1000Mbps)
+ * Tick * (2 ^ IR_s)
+ *
+ * @return: 0: calculate sucessful, negative: fail
+ */
+static int hclge_shaper_para_calc(u32 ir, u8 shaper_level,
+ struct hclge_shaper_ir_para *ir_para,
+ u32 max_tm_rate)
+{
+#define DIVISOR_CLK (1000 * 8)
+#define DIVISOR_IR_B_126 (126 * DIVISOR_CLK)
+
+ static const u16 tick_array[HCLGE_SHAPER_LVL_CNT] = {
+ 6 * 256, /* Prioriy level */
+ 6 * 32, /* Prioriy group level */
+ 6 * 8, /* Port level */
+ 6 * 256 /* Qset level */
+ };
+ u8 ir_u_calc = 0;
+ u8 ir_s_calc = 0;
+ u32 ir_calc;
+ u32 tick;
+
+ /* Calc tick */
+ if (shaper_level >= HCLGE_SHAPER_LVL_CNT ||
+ ir > max_tm_rate)
+ return -EINVAL;
+
+ tick = tick_array[shaper_level];
+
+ /**
+ * Calc the speed if ir_b = 126, ir_u = 0 and ir_s = 0
+ * the formula is changed to:
+ * 126 * 1 * 8
+ * ir_calc = ---------------- * 1000
+ * tick * 1
+ */
+ ir_calc = (DIVISOR_IR_B_126 + (tick >> 1) - 1) / tick;
+
+ if (ir_calc == ir) {
+ ir_para->ir_b = 126;
+ ir_para->ir_u = 0;
+ ir_para->ir_s = 0;
+
+ return 0;
+ } else if (ir_calc > ir) {
+ /* Increasing the denominator to select ir_s value */
+ while (ir_calc >= ir && ir) {
+ ir_s_calc++;
+ ir_calc = DIVISOR_IR_B_126 / (tick * (1 << ir_s_calc));
+ }
+
+ ir_para->ir_b = (ir * tick * (1 << ir_s_calc) +
+ (DIVISOR_CLK >> 1)) / DIVISOR_CLK;
+ } else {
+ /* Increasing the numerator to select ir_u value */
+ u32 numerator;
+
+ while (ir_calc < ir) {
+ ir_u_calc++;
+ numerator = DIVISOR_IR_B_126 * (1 << ir_u_calc);
+ ir_calc = (numerator + (tick >> 1)) / tick;
+ }
+
+ if (ir_calc == ir) {
+ ir_para->ir_b = 126;
+ } else {
+ u32 denominator = DIVISOR_CLK * (1 << --ir_u_calc);
+ ir_para->ir_b = (ir * tick + (denominator >> 1)) /
+ denominator;
+ }
+ }
+
+ ir_para->ir_u = ir_u_calc;
+ ir_para->ir_s = ir_s_calc;
+
+ return 0;
+}
+
+static int hclge_pfc_stats_get(struct hclge_dev *hdev,
+ enum hclge_opcode_type opcode, u64 *stats)
+{
+ struct hclge_desc desc[HCLGE_TM_PFC_PKT_GET_CMD_NUM];
+ int ret, i, j;
+
+ if (!(opcode == HCLGE_OPC_QUERY_PFC_RX_PKT_CNT ||
+ opcode == HCLGE_OPC_QUERY_PFC_TX_PKT_CNT))
+ return -EINVAL;
+
+ for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM - 1; i++) {
+ hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
+ desc[i].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
+ }
+
+ hclge_cmd_setup_basic_desc(&desc[i], opcode, true);
+
+ ret = hclge_cmd_send(&hdev->hw, desc, HCLGE_TM_PFC_PKT_GET_CMD_NUM);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < HCLGE_TM_PFC_PKT_GET_CMD_NUM; i++) {
+ struct hclge_pfc_stats_cmd *pfc_stats =
+ (struct hclge_pfc_stats_cmd *)desc[i].data;
+
+ for (j = 0; j < HCLGE_TM_PFC_NUM_GET_PER_CMD; j++) {
+ u32 index = i * HCLGE_TM_PFC_PKT_GET_CMD_NUM + j;
+
+ if (index < HCLGE_MAX_TC_NUM)
+ stats[index] =
+ le64_to_cpu(pfc_stats->pkt_num[j]);
+ }
+ }
+ return 0;
+}
+
+int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats)
+{
+ return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_RX_PKT_CNT, stats);
+}
+
+int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats)
+{
+ return hclge_pfc_stats_get(hdev, HCLGE_OPC_QUERY_PFC_TX_PKT_CNT, stats);
+}
+
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PAUSE_EN, false);
+
+ desc.data[0] = cpu_to_le32((tx ? HCLGE_TX_MAC_PAUSE_EN_MSK : 0) |
+ (rx ? HCLGE_RX_MAC_PAUSE_EN_MSK : 0));
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap)
+{
+ struct hclge_desc desc;
+ struct hclge_pfc_en_cmd *pfc = (struct hclge_pfc_en_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_PFC_PAUSE_EN, false);
+
+ pfc->tx_rx_en_bitmap = tx_rx_bitmap;
+ pfc->pri_en_bitmap = pfc_bitmap;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_pause_param_cfg(struct hclge_dev *hdev, const u8 *addr,
+ u8 pause_trans_gap, u16 pause_trans_time)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, false);
+
+ ether_addr_copy(pause_param->mac_addr, addr);
+ ether_addr_copy(pause_param->mac_addr_extra, addr);
+ pause_param->pause_trans_gap = pause_trans_gap;
+ pause_param->pause_trans_time = cpu_to_le16(pause_trans_time);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr)
+{
+ struct hclge_cfg_pause_param_cmd *pause_param;
+ struct hclge_desc desc;
+ u16 trans_time;
+ u8 trans_gap;
+ int ret;
+
+ pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ return ret;
+
+ trans_gap = pause_param->pause_trans_gap;
+ trans_time = le16_to_cpu(pause_param->pause_trans_time);
+
+ return hclge_pause_param_cfg(hdev, mac_addr, trans_gap, trans_time);
+}
+
+static int hclge_fill_pri_array(struct hclge_dev *hdev, u8 *pri, u8 pri_id)
+{
+ u8 tc;
+
+ tc = hdev->tm_info.prio_tc[pri_id];
+
+ if (tc >= hdev->tm_info.num_tc)
+ return -EINVAL;
+
+ /**
+ * the register for priority has four bytes, the first bytes includes
+ * priority0 and priority1, the higher 4bit stands for priority1
+ * while the lower 4bit stands for priority0, as below:
+ * first byte: | pri_1 | pri_0 |
+ * second byte: | pri_3 | pri_2 |
+ * third byte: | pri_5 | pri_4 |
+ * fourth byte: | pri_7 | pri_6 |
+ */
+ pri[pri_id >> 1] |= tc << ((pri_id & 1) * 4);
+
+ return 0;
+}
+
+static int hclge_up_to_tc_map(struct hclge_dev *hdev)
+{
+ struct hclge_desc desc;
+ u8 *pri = (u8 *)desc.data;
+ u8 pri_id;
+ int ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, false);
+
+ for (pri_id = 0; pri_id < HNAE3_MAX_USER_PRIO; pri_id++) {
+ ret = hclge_fill_pri_array(hdev, pri, pri_id);
+ if (ret)
+ return ret;
+ }
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev,
+ u8 pg_id, u8 pri_bit_map)
+{
+ struct hclge_pg_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_TO_PRI_LINK, false);
+
+ map = (struct hclge_pg_to_pri_link_cmd *)desc.data;
+
+ map->pg_id = pg_id;
+ map->pri_bit_map = pri_bit_map;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev,
+ u16 qs_id, u8 pri)
+{
+ struct hclge_qs_to_pri_link_cmd *map;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_TO_PRI_LINK, false);
+
+ map = (struct hclge_qs_to_pri_link_cmd *)desc.data;
+
+ map->qs_id = cpu_to_le16(qs_id);
+ map->priority = pri;
+ map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_q_to_qs_map_cfg(struct hclge_dev *hdev,
+ u16 q_id, u16 qs_id)
+{
+ struct hclge_nq_to_qs_link_cmd *map;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_NQ_TO_QS_LINK, false);
+
+ map = (struct hclge_nq_to_qs_link_cmd *)desc.data;
+
+ map->nq_id = cpu_to_le16(q_id);
+ map->qset_id = cpu_to_le16(qs_id | HCLGE_TM_Q_QS_LINK_VLD_MSK);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pg_weight_cfg(struct hclge_dev *hdev, u8 pg_id,
+ u8 dwrr)
+{
+ struct hclge_pg_weight_cmd *weight;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_WEIGHT, false);
+
+ weight = (struct hclge_pg_weight_cmd *)desc.data;
+
+ weight->pg_id = pg_id;
+ weight->dwrr = dwrr;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pri_weight_cfg(struct hclge_dev *hdev, u8 pri_id,
+ u8 dwrr)
+{
+ struct hclge_priority_weight_cmd *weight;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_WEIGHT, false);
+
+ weight = (struct hclge_priority_weight_cmd *)desc.data;
+
+ weight->pri_id = pri_id;
+ weight->dwrr = dwrr;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_qs_weight_cfg(struct hclge_dev *hdev, u16 qs_id,
+ u8 dwrr)
+{
+ struct hclge_qs_weight_cmd *weight;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_WEIGHT, false);
+
+ weight = (struct hclge_qs_weight_cmd *)desc.data;
+
+ weight->qs_id = cpu_to_le16(qs_id);
+ weight->dwrr = dwrr;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static u32 hclge_tm_get_shapping_para(u8 ir_b, u8 ir_u, u8 ir_s,
+ u8 bs_b, u8 bs_s)
+{
+ u32 shapping_para = 0;
+
+ hclge_tm_set_field(shapping_para, IR_B, ir_b);
+ hclge_tm_set_field(shapping_para, IR_U, ir_u);
+ hclge_tm_set_field(shapping_para, IR_S, ir_s);
+ hclge_tm_set_field(shapping_para, BS_B, bs_b);
+ hclge_tm_set_field(shapping_para, BS_S, bs_s);
+
+ return shapping_para;
+}
+
+static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev,
+ enum hclge_shap_bucket bucket, u8 pg_id,
+ u32 shapping_para)
+{
+ struct hclge_pg_shapping_cmd *shap_cfg_cmd;
+ enum hclge_opcode_type opcode;
+ struct hclge_desc desc;
+
+ opcode = bucket ? HCLGE_OPC_TM_PG_P_SHAPPING :
+ HCLGE_OPC_TM_PG_C_SHAPPING;
+ hclge_cmd_setup_basic_desc(&desc, opcode, false);
+
+ shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data;
+
+ shap_cfg_cmd->pg_id = pg_id;
+
+ shap_cfg_cmd->pg_shapping_para = cpu_to_le32(shapping_para);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_port_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
+ struct hclge_desc desc;
+ u32 shapping_para;
+ int ret;
+
+ ret = hclge_shaper_para_calc(hdev->hw.mac.speed, HCLGE_SHAPER_LVL_PORT,
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
+ if (ret)
+ return ret;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PORT_SHAPPING, false);
+ shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data;
+
+ shapping_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+
+ shap_cfg_cmd->port_shapping_para = cpu_to_le32(shapping_para);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pri_shapping_cfg(struct hclge_dev *hdev,
+ enum hclge_shap_bucket bucket, u8 pri_id,
+ u32 shapping_para)
+{
+ struct hclge_pri_shapping_cmd *shap_cfg_cmd;
+ enum hclge_opcode_type opcode;
+ struct hclge_desc desc;
+
+ opcode = bucket ? HCLGE_OPC_TM_PRI_P_SHAPPING :
+ HCLGE_OPC_TM_PRI_C_SHAPPING;
+
+ hclge_cmd_setup_basic_desc(&desc, opcode, false);
+
+ shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data;
+
+ shap_cfg_cmd->pri_id = pri_id;
+
+ shap_cfg_cmd->pri_shapping_para = cpu_to_le32(shapping_para);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pg_schd_mode_cfg(struct hclge_dev *hdev, u8 pg_id)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PG_SCH_MODE_CFG, false);
+
+ if (hdev->tm_info.pg_info[pg_id].pg_sch_mode == HCLGE_SCH_MODE_DWRR)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
+ else
+ desc.data[1] = 0;
+
+ desc.data[0] = cpu_to_le32(pg_id);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pri_schd_mode_cfg(struct hclge_dev *hdev, u8 pri_id)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_PRI_SCH_MODE_CFG, false);
+
+ if (hdev->tm_info.tc_info[pri_id].tc_sch_mode == HCLGE_SCH_MODE_DWRR)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
+ else
+ desc.data[1] = 0;
+
+ desc.data[0] = cpu_to_le32(pri_id);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_qs_schd_mode_cfg(struct hclge_dev *hdev, u16 qs_id, u8 mode)
+{
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_QS_SCH_MODE_CFG, false);
+
+ if (mode == HCLGE_SCH_MODE_DWRR)
+ desc.data[1] = cpu_to_le32(HCLGE_TM_TX_SCHD_DWRR_MSK);
+ else
+ desc.data[1] = 0;
+
+ desc.data[0] = cpu_to_le32(qs_id);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_qs_bp_cfg(struct hclge_dev *hdev, u8 tc, u8 grp_id,
+ u32 bit_map)
+{
+ struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd;
+ struct hclge_desc desc;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TM_BP_TO_QSET_MAPPING,
+ false);
+
+ bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data;
+
+ bp_to_qs_map_cmd->tc_id = tc;
+ bp_to_qs_map_cmd->qs_group_id = grp_id;
+ bp_to_qs_map_cmd->qs_bit_map = cpu_to_le32(bit_map);
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_qs_shapping_cmd *shap_cfg_cmd;
+ struct hclge_shaper_ir_para ir_para;
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_desc desc;
+ u32 shaper_para;
+ int ret, i;
+
+ if (!max_tx_rate)
+ max_tx_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+
+ ret = hclge_shaper_para_calc(max_tx_rate, HCLGE_SHAPER_LVL_QSET,
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QCN_SHAPPING_CFG,
+ false);
+
+ shap_cfg_cmd = (struct hclge_qs_shapping_cmd *)desc.data;
+ shap_cfg_cmd->qs_id = cpu_to_le16(vport->qs_offset + i);
+ shap_cfg_cmd->qs_shapping_para = cpu_to_le32(shaper_para);
+
+ ret = hclge_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "vf%u, qs%u failed to set tx_rate:%d, ret=%d\n",
+ vport->vport_id, shap_cfg_cmd->qs_id,
+ max_tx_rate, ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u16 max_rss_size;
+ u8 i;
+
+ /* TC configuration is shared by PF/VF in one port, only allow
+ * one tc for VF for simplicity. VF's vport_id is non zero.
+ */
+ kinfo->num_tc = vport->vport_id ? 1 :
+ min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
+ vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
+ (vport->vport_id ? (vport->vport_id - 1) : 0);
+
+ max_rss_size = min_t(u16, hdev->rss_size_max,
+ vport->alloc_tqps / kinfo->num_tc);
+
+ /* Set to user value, no larger than max_rss_size. */
+ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
+ kinfo->req_rss_size <= max_rss_size) {
+ dev_info(&hdev->pdev->dev, "rss changes from %u to %u\n",
+ kinfo->rss_size, kinfo->req_rss_size);
+ kinfo->rss_size = kinfo->req_rss_size;
+ } else if (kinfo->rss_size > max_rss_size ||
+ (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size)) {
+ /* if user not set rss, the rss_size should compare with the
+ * valid msi numbers to ensure one to one map between tqp and
+ * irq as default.
+ */
+ if (!kinfo->req_rss_size)
+ max_rss_size = min_t(u16, max_rss_size,
+ (hdev->num_nic_msi - 1) /
+ kinfo->num_tc);
+
+ /* Set to the maximum specification value (max_rss_size). */
+ kinfo->rss_size = max_rss_size;
+ }
+
+ kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
+ vport->dwrr = 100; /* 100 percent as init */
+ vport->alloc_rss_size = kinfo->rss_size;
+ vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ if (hdev->hw_tc_map & BIT(i) && i < kinfo->num_tc) {
+ kinfo->tc_info[i].enable = true;
+ kinfo->tc_info[i].tqp_offset = i * kinfo->rss_size;
+ kinfo->tc_info[i].tqp_count = kinfo->rss_size;
+ kinfo->tc_info[i].tc = i;
+ } else {
+ /* Set to default queue if TC is disable */
+ kinfo->tc_info[i].enable = false;
+ kinfo->tc_info[i].tqp_offset = 0;
+ kinfo->tc_info[i].tqp_count = 1;
+ kinfo->tc_info[i].tc = 0;
+ }
+ }
+
+ memcpy(kinfo->prio_tc, hdev->tm_info.prio_tc,
+ sizeof_field(struct hnae3_knic_private_info, prio_tc));
+}
+
+static void hclge_tm_vport_info_update(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ u32 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ hclge_tm_vport_tc_info_update(vport);
+
+ vport++;
+ }
+}
+
+static void hclge_tm_tc_info_init(struct hclge_dev *hdev)
+{
+ u8 i;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ hdev->tm_info.tc_info[i].tc_id = i;
+ hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR;
+ hdev->tm_info.tc_info[i].pgid = 0;
+ hdev->tm_info.tc_info[i].bw_limit =
+ hdev->tm_info.pg_info[0].bw_limit;
+ }
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++)
+ hdev->tm_info.prio_tc[i] =
+ (i >= hdev->tm_info.num_tc) ? 0 : i;
+}
+
+static void hclge_tm_pg_info_init(struct hclge_dev *hdev)
+{
+#define BW_PERCENT 100
+#define DEFAULT_BW_WEIGHT 1
+
+ u8 i;
+
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ int k;
+
+ hdev->tm_info.pg_dwrr[i] = i ? 0 : BW_PERCENT;
+
+ hdev->tm_info.pg_info[i].pg_id = i;
+ hdev->tm_info.pg_info[i].pg_sch_mode = HCLGE_SCH_MODE_DWRR;
+
+ hdev->tm_info.pg_info[i].bw_limit =
+ hdev->ae_dev->dev_specs.max_tm_rate;
+
+ if (i != 0)
+ continue;
+
+ hdev->tm_info.pg_info[i].tc_bit_map = hdev->hw_tc_map;
+ for (k = 0; k < hdev->tm_info.num_tc; k++)
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT;
+ for (; k < HNAE3_MAX_TC; k++)
+ hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT;
+ }
+}
+
+static void hclge_update_fc_mode_by_dcb_flag(struct hclge_dev *hdev)
+{
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en) {
+ if (hdev->fc_mode_last_time == HCLGE_FC_PFC)
+ dev_warn(&hdev->pdev->dev,
+ "Only 1 tc used, but last mode is FC_PFC\n");
+
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ } else if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ /* fc_mode_last_time record the last fc_mode when
+ * DCB is enabled, so that fc_mode can be set to
+ * the correct value when DCB is disabled.
+ */
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+ hdev->tm_info.fc_mode = HCLGE_FC_PFC;
+ }
+}
+
+static void hclge_update_fc_mode(struct hclge_dev *hdev)
+{
+ if (!hdev->tm_info.pfc_en) {
+ hdev->tm_info.fc_mode = hdev->fc_mode_last_time;
+ return;
+ }
+
+ if (hdev->tm_info.fc_mode != HCLGE_FC_PFC) {
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+ hdev->tm_info.fc_mode = HCLGE_FC_PFC;
+ }
+}
+
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev)
+{
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3)
+ hclge_update_fc_mode(hdev);
+ else
+ hclge_update_fc_mode_by_dcb_flag(hdev);
+}
+
+static void hclge_tm_schd_info_init(struct hclge_dev *hdev)
+{
+ hclge_tm_pg_info_init(hdev);
+
+ hclge_tm_tc_info_init(hdev);
+
+ hclge_tm_vport_info_update(hdev);
+
+ hclge_tm_pfc_info_update(hdev);
+}
+
+static int hclge_tm_pg_to_pri_map(struct hclge_dev *hdev)
+{
+ int ret;
+ u32 i;
+
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
+ return 0;
+
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ /* Cfg mapping */
+ ret = hclge_tm_pg_to_pri_map_cfg(
+ hdev, i, hdev->tm_info.pg_info[i].tc_bit_map);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pg_shaper_cfg(struct hclge_dev *hdev)
+{
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
+ u32 shaper_para;
+ int ret;
+ u32 i;
+
+ /* Cfg pg schd */
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
+ return 0;
+
+ /* Pg to pri */
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ /* Calc shaper para */
+ ret = hclge_shaper_para_calc(hdev->tm_info.pg_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_PG,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pg_shapping_cfg(hdev,
+ HCLGE_TM_SHAP_C_BUCKET, i,
+ shaper_para);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pg_shapping_cfg(hdev,
+ HCLGE_TM_SHAP_P_BUCKET, i,
+ shaper_para);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pg_dwrr_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+ u32 i;
+
+ /* cfg pg schd */
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE)
+ return 0;
+
+ /* pg to prio */
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ /* Cfg dwrr */
+ ret = hclge_tm_pg_weight_cfg(hdev, i, hdev->tm_info.pg_dwrr[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_vport_q_to_qs_map(struct hclge_dev *hdev,
+ struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hnae3_queue **tqp = kinfo->tqp;
+ struct hnae3_tc_info *v_tc_info;
+ u32 i, j;
+ int ret;
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ v_tc_info = &kinfo->tc_info[i];
+ for (j = 0; j < v_tc_info->tqp_count; j++) {
+ struct hnae3_queue *q = tqp[v_tc_info->tqp_offset + j];
+
+ ret = hclge_tm_q_to_qs_map_cfg(hdev,
+ hclge_get_queue_id(q),
+ vport->qs_offset + i);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_q_qs_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i, k;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
+ /* Cfg qs -> pri mapping, one by one mapping */
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hnae3_knic_private_info *kinfo =
+ &vport[k].nic.kinfo;
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(
+ hdev, vport[k].qs_offset + i, i);
+ if (ret)
+ return ret;
+ }
+ }
+ } else if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE) {
+ /* Cfg qs -> pri mapping, qs = tc, pri = vf, 8 qs -> 1 pri */
+ for (k = 0; k < hdev->num_alloc_vport; k++)
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ ret = hclge_tm_qs_to_pri_map_cfg(
+ hdev, vport[k].qs_offset + i, k);
+ if (ret)
+ return ret;
+ }
+ } else {
+ return -EINVAL;
+ }
+
+ /* Cfg q -> qs mapping */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_vport_q_to_qs_map(hdev, vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev)
+{
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
+ u32 shaper_para;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_PRI,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i,
+ shaper_para);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b,
+ ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i,
+ shaper_para);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_vnet_base_shaper_pri_cfg(struct hclge_vport *vport)
+{
+ struct hclge_dev *hdev = vport->back;
+ struct hclge_shaper_ir_para ir_para;
+ u32 shaper_para;
+ int ret;
+
+ ret = hclge_shaper_para_calc(vport->bw_limit, HCLGE_SHAPER_LVL_VF,
+ &ir_para,
+ hdev->ae_dev->dev_specs.max_tm_rate);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(0, 0, 0,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET,
+ vport->vport_id, shaper_para);
+ if (ret)
+ return ret;
+
+ shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, ir_para.ir_u,
+ ir_para.ir_s,
+ HCLGE_SHAPER_BS_U_DEF,
+ HCLGE_SHAPER_BS_S_DEF);
+ ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET,
+ vport->vport_id, shaper_para);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int hclge_tm_pri_vnet_base_shaper_qs_cfg(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate;
+ struct hclge_shaper_ir_para ir_para;
+ u32 i;
+ int ret;
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ ret = hclge_shaper_para_calc(hdev->tm_info.tc_info[i].bw_limit,
+ HCLGE_SHAPER_LVL_QSET,
+ &ir_para, max_tm_rate);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_vnet_base_shaper_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i;
+
+ /* Need config vport shaper */
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_tm_pri_vnet_base_shaper_pri_cfg(vport);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_pri_vnet_base_shaper_qs_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_shaper_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
+ ret = hclge_tm_pri_tc_base_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+ } else {
+ ret = hclge_tm_pri_vnet_base_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hclge_pg_info *pg_info;
+ u8 dwrr;
+ int ret;
+ u32 i, k;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ pg_info =
+ &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
+ dwrr = pg_info->tc_dwrr[i];
+
+ ret = hclge_tm_pri_weight_cfg(hdev, i, dwrr);
+ if (ret)
+ return ret;
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ ret = hclge_tm_qs_weight_cfg(
+ hdev, vport[k].qs_offset + i,
+ vport[k].dwrr);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_tm_ets_tc_dwrr_cfg(struct hclge_dev *hdev)
+{
+#define DEFAULT_TC_OFFSET 14
+
+ struct hclge_ets_tc_weight_cmd *ets_weight;
+ struct hclge_desc desc;
+ unsigned int i;
+
+ hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, false);
+ ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data;
+
+ for (i = 0; i < HNAE3_MAX_TC; i++) {
+ struct hclge_pg_info *pg_info;
+
+ pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid];
+ ets_weight->tc_weight[i] = pg_info->tc_dwrr[i];
+ }
+
+ ets_weight->weight_offset = DEFAULT_TC_OFFSET;
+
+ return hclge_cmd_send(&hdev->hw, &desc, 1);
+}
+
+static int hclge_tm_pri_vnet_base_dwrr_pri_cfg(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+ u8 i;
+
+ /* Vf dwrr */
+ ret = hclge_tm_pri_weight_cfg(hdev, vport->vport_id, vport->dwrr);
+ if (ret)
+ return ret;
+
+ /* Qset dwrr */
+ for (i = 0; i < kinfo->num_tc; i++) {
+ ret = hclge_tm_qs_weight_cfg(
+ hdev, vport->qs_offset + i,
+ hdev->tm_info.pg_info[0].tc_dwrr[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_vnet_base_dwrr_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u32 i;
+
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_tm_pri_vnet_base_dwrr_pri_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_pri_dwrr_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
+ ret = hclge_tm_pri_tc_base_dwrr_cfg(hdev);
+ if (ret)
+ return ret;
+
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
+ ret = hclge_tm_ets_tc_dwrr_cfg(hdev);
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&hdev->pdev->dev,
+ "fw %08x does't support ets tc weight cmd\n",
+ hdev->fw_version);
+ ret = 0;
+ }
+
+ return ret;
+ } else {
+ ret = hclge_tm_pri_vnet_base_dwrr_cfg(hdev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_map_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_up_to_tc_map(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_pg_to_pri_map(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_pri_q_qs_cfg(hdev);
+}
+
+static int hclge_tm_shaper_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_tm_port_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_tm_pg_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_pri_shaper_cfg(hdev);
+}
+
+int hclge_tm_dwrr_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_tm_pg_dwrr_cfg(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_pri_dwrr_cfg(hdev);
+}
+
+static int hclge_tm_lvl2_schd_mode_cfg(struct hclge_dev *hdev)
+{
+ int ret;
+ u8 i;
+
+ /* Only being config on TC-Based scheduler mode */
+ if (hdev->tx_sch_mode == HCLGE_FLAG_VNET_BASE_SCH_MODE)
+ return 0;
+
+ for (i = 0; i < hdev->tm_info.num_pg; i++) {
+ ret = hclge_tm_pg_schd_mode_cfg(hdev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_schd_mode_vnet_base_cfg(struct hclge_vport *vport)
+{
+ struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
+ struct hclge_dev *hdev = vport->back;
+ int ret;
+ u8 i;
+
+ if (vport->vport_id >= HNAE3_MAX_TC)
+ return -EINVAL;
+
+ ret = hclge_tm_pri_schd_mode_cfg(hdev, vport->vport_id);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < kinfo->num_tc; i++) {
+ u8 sch_mode = hdev->tm_info.tc_info[i].tc_sch_mode;
+
+ ret = hclge_tm_qs_schd_mode_cfg(hdev, vport->qs_offset + i,
+ sch_mode);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+ u8 i, k;
+
+ if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) {
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ ret = hclge_tm_pri_schd_mode_cfg(hdev, i);
+ if (ret)
+ return ret;
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ ret = hclge_tm_qs_schd_mode_cfg(
+ hdev, vport[k].qs_offset + i,
+ HCLGE_SCH_MODE_DWRR);
+ if (ret)
+ return ret;
+ }
+ }
+ } else {
+ for (i = 0; i < hdev->num_alloc_vport; i++) {
+ ret = hclge_tm_schd_mode_vnet_base_cfg(vport);
+ if (ret)
+ return ret;
+
+ vport++;
+ }
+ }
+
+ return 0;
+}
+
+static int hclge_tm_schd_mode_hw(struct hclge_dev *hdev)
+{
+ int ret;
+
+ ret = hclge_tm_lvl2_schd_mode_cfg(hdev);
+ if (ret)
+ return ret;
+
+ return hclge_tm_lvl34_schd_mode_cfg(hdev);
+}
+
+int hclge_tm_schd_setup_hw(struct hclge_dev *hdev)
+{
+ int ret;
+
+ /* Cfg tm mapping */
+ ret = hclge_tm_map_cfg(hdev);
+ if (ret)
+ return ret;
+
+ /* Cfg tm shaper */
+ ret = hclge_tm_shaper_cfg(hdev);
+ if (ret)
+ return ret;
+
+ /* Cfg dwrr */
+ ret = hclge_tm_dwrr_cfg(hdev);
+ if (ret)
+ return ret;
+
+ /* Cfg schd mode for each level schd */
+ return hclge_tm_schd_mode_hw(hdev);
+}
+
+static int hclge_pause_param_setup_hw(struct hclge_dev *hdev)
+{
+ struct hclge_mac *mac = &hdev->hw.mac;
+
+ return hclge_pause_param_cfg(hdev, mac->mac_addr,
+ HCLGE_DEFAULT_PAUSE_TRANS_GAP,
+ HCLGE_DEFAULT_PAUSE_TRANS_TIME);
+}
+
+static int hclge_pfc_setup_hw(struct hclge_dev *hdev)
+{
+ u8 enable_bitmap = 0;
+
+ if (hdev->tm_info.fc_mode == HCLGE_FC_PFC)
+ enable_bitmap = HCLGE_TX_MAC_PAUSE_EN_MSK |
+ HCLGE_RX_MAC_PAUSE_EN_MSK;
+
+ return hclge_pfc_pause_en_cfg(hdev, enable_bitmap,
+ hdev->tm_info.pfc_en);
+}
+
+/* Each Tc has a 1024 queue sets to backpress, it divides to
+ * 32 group, each group contains 32 queue sets, which can be
+ * represented by u32 bitmap.
+ */
+static int hclge_bp_setup_hw(struct hclge_dev *hdev, u8 tc)
+{
+ int i;
+
+ for (i = 0; i < HCLGE_BP_GRP_NUM; i++) {
+ u32 qs_bitmap = 0;
+ int k, ret;
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ struct hclge_vport *vport = &hdev->vport[k];
+ u16 qs_id = vport->qs_offset + tc;
+ u8 grp, sub_grp;
+
+ grp = hnae3_get_field(qs_id, HCLGE_BP_GRP_ID_M,
+ HCLGE_BP_GRP_ID_S);
+ sub_grp = hnae3_get_field(qs_id, HCLGE_BP_SUB_GRP_ID_M,
+ HCLGE_BP_SUB_GRP_ID_S);
+ if (i == grp)
+ qs_bitmap |= (1 << sub_grp);
+ }
+
+ ret = hclge_tm_qs_bp_cfg(hdev, tc, i, qs_bitmap);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int hclge_mac_pause_setup_hw(struct hclge_dev *hdev)
+{
+ bool tx_en, rx_en;
+
+ switch (hdev->tm_info.fc_mode) {
+ case HCLGE_FC_NONE:
+ tx_en = false;
+ rx_en = false;
+ break;
+ case HCLGE_FC_RX_PAUSE:
+ tx_en = false;
+ rx_en = true;
+ break;
+ case HCLGE_FC_TX_PAUSE:
+ tx_en = true;
+ rx_en = false;
+ break;
+ case HCLGE_FC_FULL:
+ tx_en = true;
+ rx_en = true;
+ break;
+ case HCLGE_FC_PFC:
+ tx_en = false;
+ rx_en = false;
+ break;
+ default:
+ tx_en = true;
+ rx_en = true;
+ }
+
+ return hclge_mac_pause_en_cfg(hdev, tx_en, rx_en);
+}
+
+static int hclge_tm_bp_setup(struct hclge_dev *hdev)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++) {
+ ret = hclge_bp_setup_hw(hdev, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init)
+{
+ int ret;
+
+ ret = hclge_pause_param_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_mac_pause_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ /* Only DCB-supported dev supports qset back pressure and pfc cmd */
+ if (!hnae3_dev_dcb_supported(hdev))
+ return 0;
+
+ /* GE MAC does not support PFC, when driver is initializing and MAC
+ * is in GE Mode, ignore the error here, otherwise initialization
+ * will fail.
+ */
+ ret = hclge_pfc_setup_hw(hdev);
+ if (init && ret == -EOPNOTSUPP)
+ dev_warn(&hdev->pdev->dev, "GE MAC does not support pfc\n");
+ else if (ret) {
+ dev_err(&hdev->pdev->dev, "config pfc failed! ret = %d\n",
+ ret);
+ return ret;
+ }
+
+ return hclge_tm_bp_setup(hdev);
+}
+
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc)
+{
+ struct hclge_vport *vport = hdev->vport;
+ struct hnae3_knic_private_info *kinfo;
+ u32 i, k;
+
+ for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
+ hdev->tm_info.prio_tc[i] = prio_tc[i];
+
+ for (k = 0; k < hdev->num_alloc_vport; k++) {
+ kinfo = &vport[k].nic.kinfo;
+ kinfo->prio_tc[i] = prio_tc[i];
+ }
+ }
+}
+
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc)
+{
+ u8 bit_map = 0;
+ u8 i;
+
+ hdev->tm_info.num_tc = num_tc;
+
+ for (i = 0; i < hdev->tm_info.num_tc; i++)
+ bit_map |= BIT(i);
+
+ if (!bit_map) {
+ bit_map = 1;
+ hdev->tm_info.num_tc = 1;
+ }
+
+ hdev->hw_tc_map = bit_map;
+
+ hclge_tm_schd_info_init(hdev);
+}
+
+int hclge_tm_init_hw(struct hclge_dev *hdev, bool init)
+{
+ int ret;
+
+ if ((hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE) &&
+ (hdev->tx_sch_mode != HCLGE_FLAG_VNET_BASE_SCH_MODE))
+ return -ENOTSUPP;
+
+ ret = hclge_tm_schd_setup_hw(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclge_pause_setup_hw(hdev, init);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int hclge_tm_schd_init(struct hclge_dev *hdev)
+{
+ /* fc_mode is HCLGE_FC_FULL on reset */
+ hdev->tm_info.fc_mode = HCLGE_FC_FULL;
+ hdev->fc_mode_last_time = hdev->tm_info.fc_mode;
+
+ if (hdev->tx_sch_mode != HCLGE_FLAG_TC_BASE_SCH_MODE &&
+ hdev->tm_info.num_pg != 1)
+ return -EINVAL;
+
+ hclge_tm_schd_info_init(hdev);
+
+ return hclge_tm_init_hw(hdev, true);
+}
+
+int hclge_tm_vport_map_update(struct hclge_dev *hdev)
+{
+ struct hclge_vport *vport = hdev->vport;
+ int ret;
+
+ hclge_tm_vport_tc_info_update(vport);
+
+ ret = hclge_vport_q_to_qs_map(hdev, vport);
+ if (ret)
+ return ret;
+
+ if (hdev->tm_info.num_tc == 1 && !hdev->tm_info.pfc_en)
+ return 0;
+
+ return hclge_tm_bp_setup(hdev);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
new file mode 100644
index 000000000..42932c879
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#ifndef __HCLGE_TM_H
+#define __HCLGE_TM_H
+
+#include <linux/types.h>
+
+/* MAC Pause */
+#define HCLGE_TX_MAC_PAUSE_EN_MSK BIT(0)
+#define HCLGE_RX_MAC_PAUSE_EN_MSK BIT(1)
+
+#define HCLGE_TM_PORT_BASE_MODE_MSK BIT(0)
+
+#define HCLGE_DEFAULT_PAUSE_TRANS_GAP 0x7F
+#define HCLGE_DEFAULT_PAUSE_TRANS_TIME 0xFFFF
+
+/* SP or DWRR */
+#define HCLGE_TM_TX_SCHD_DWRR_MSK BIT(0)
+#define HCLGE_TM_TX_SCHD_SP_MSK (0xFE)
+
+#define HCLGE_ETHER_MAX_RATE 100000
+
+struct hclge_pg_to_pri_link_cmd {
+ u8 pg_id;
+ u8 rsvd1[3];
+ u8 pri_bit_map;
+};
+
+struct hclge_qs_to_pri_link_cmd {
+ __le16 qs_id;
+ __le16 rsvd;
+ u8 priority;
+#define HCLGE_TM_QS_PRI_LINK_VLD_MSK BIT(0)
+ u8 link_vld;
+};
+
+struct hclge_nq_to_qs_link_cmd {
+ __le16 nq_id;
+ __le16 rsvd;
+#define HCLGE_TM_Q_QS_LINK_VLD_MSK BIT(10)
+ __le16 qset_id;
+};
+
+struct hclge_tqp_tx_queue_tc_cmd {
+ __le16 queue_id;
+ __le16 rsvd;
+ u8 tc_id;
+ u8 rev[3];
+};
+
+struct hclge_pg_weight_cmd {
+ u8 pg_id;
+ u8 dwrr;
+};
+
+struct hclge_priority_weight_cmd {
+ u8 pri_id;
+ u8 dwrr;
+};
+
+struct hclge_qs_weight_cmd {
+ __le16 qs_id;
+ u8 dwrr;
+};
+
+struct hclge_ets_tc_weight_cmd {
+ u8 tc_weight[HNAE3_MAX_TC];
+ u8 weight_offset;
+ u8 rsvd[15];
+};
+
+#define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0)
+#define HCLGE_TM_SHAP_IR_B_LSH 0
+#define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8)
+#define HCLGE_TM_SHAP_IR_U_LSH 8
+#define HCLGE_TM_SHAP_IR_S_MSK GENMASK(15, 12)
+#define HCLGE_TM_SHAP_IR_S_LSH 12
+#define HCLGE_TM_SHAP_BS_B_MSK GENMASK(20, 16)
+#define HCLGE_TM_SHAP_BS_B_LSH 16
+#define HCLGE_TM_SHAP_BS_S_MSK GENMASK(25, 21)
+#define HCLGE_TM_SHAP_BS_S_LSH 21
+
+enum hclge_shap_bucket {
+ HCLGE_TM_SHAP_C_BUCKET = 0,
+ HCLGE_TM_SHAP_P_BUCKET,
+};
+
+struct hclge_pri_shapping_cmd {
+ u8 pri_id;
+ u8 rsvd[3];
+ __le32 pri_shapping_para;
+};
+
+struct hclge_pg_shapping_cmd {
+ u8 pg_id;
+ u8 rsvd[3];
+ __le32 pg_shapping_para;
+};
+
+struct hclge_qs_shapping_cmd {
+ __le16 qs_id;
+ u8 rsvd[2];
+ __le32 qs_shapping_para;
+};
+
+#define HCLGE_BP_GRP_NUM 32
+#define HCLGE_BP_SUB_GRP_ID_S 0
+#define HCLGE_BP_SUB_GRP_ID_M GENMASK(4, 0)
+#define HCLGE_BP_GRP_ID_S 5
+#define HCLGE_BP_GRP_ID_M GENMASK(9, 5)
+struct hclge_bp_to_qs_map_cmd {
+ u8 tc_id;
+ u8 rsvd[2];
+ u8 qs_group_id;
+ __le32 qs_bit_map;
+ u32 rsvd1;
+};
+
+#define HCLGE_PFC_DISABLE 0
+#define HCLGE_PFC_TX_RX_DISABLE 0
+
+struct hclge_pfc_en_cmd {
+ u8 tx_rx_en_bitmap;
+ u8 pri_en_bitmap;
+};
+
+struct hclge_cfg_pause_param_cmd {
+ u8 mac_addr[ETH_ALEN];
+ u8 pause_trans_gap;
+ u8 rsvd;
+ __le16 pause_trans_time;
+ u8 rsvd1[6];
+ /* extra mac address to do double check for pause frame */
+ u8 mac_addr_extra[ETH_ALEN];
+ u16 rsvd2;
+};
+
+struct hclge_pfc_stats_cmd {
+ __le64 pkt_num[3];
+};
+
+struct hclge_port_shapping_cmd {
+ __le32 port_shapping_para;
+};
+
+struct hclge_shaper_ir_para {
+ u8 ir_b; /* IR_B parameter of IR shaper */
+ u8 ir_u; /* IR_U parameter of IR shaper */
+ u8 ir_s; /* IR_S parameter of IR shaper */
+};
+
+#define hclge_tm_set_field(dest, string, val) \
+ hnae3_set_field((dest), \
+ (HCLGE_TM_SHAP_##string##_MSK), \
+ (HCLGE_TM_SHAP_##string##_LSH), val)
+#define hclge_tm_get_field(src, string) \
+ hnae3_get_field((src), (HCLGE_TM_SHAP_##string##_MSK), \
+ (HCLGE_TM_SHAP_##string##_LSH))
+
+int hclge_tm_schd_init(struct hclge_dev *hdev);
+int hclge_tm_vport_map_update(struct hclge_dev *hdev);
+int hclge_pause_setup_hw(struct hclge_dev *hdev, bool init);
+int hclge_tm_schd_setup_hw(struct hclge_dev *hdev);
+void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc);
+void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc);
+void hclge_tm_pfc_info_update(struct hclge_dev *hdev);
+int hclge_tm_dwrr_cfg(struct hclge_dev *hdev);
+int hclge_tm_init_hw(struct hclge_dev *hdev, bool init);
+int hclge_pfc_pause_en_cfg(struct hclge_dev *hdev, u8 tx_rx_bitmap,
+ u8 pfc_bitmap);
+int hclge_mac_pause_en_cfg(struct hclge_dev *hdev, bool tx, bool rx);
+int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr);
+int hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats);
+int hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats);
+int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate);
+
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
new file mode 100644
index 000000000..5b0b71bd6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2020 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGE_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define PF_GET_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+#define PF_SEND_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_pf_mbx_get,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, PF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_pf_mbx_send,
+ TP_PROTO(
+ struct hclge_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->vport[0].nic.kinfo.netdev->name)
+ __array(u32, mbx_data, PF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = req->msg.code;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->vport[0].nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, PF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGE_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGE_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclge_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
new file mode 100644
index 000000000..2c26ea607
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0+
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3
+ccflags-y += -I $(srctree)/$(src)
+
+obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o
+hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
new file mode 100644
index 000000000..cae6db17c
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/device.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+#define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
+
+static int hclgevf_ring_space(struct hclgevf_cmq_ring *ring)
+{
+ int ntc = ring->next_to_clean;
+ int ntu = ring->next_to_use;
+ int used;
+
+ used = (ntu - ntc + ring->desc_num) % ring->desc_num;
+
+ return ring->desc_num - used - 1;
+}
+
+static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring *ring,
+ int head)
+{
+ int ntu = ring->next_to_use;
+ int ntc = ring->next_to_clean;
+
+ if (ntu > ntc)
+ return head >= ntc && head <= ntu;
+
+ return head >= ntc || head <= ntu;
+}
+
+static int hclgevf_cmd_csq_clean(struct hclgevf_hw *hw)
+{
+ struct hclgevf_dev *hdev = container_of(hw, struct hclgevf_dev, hw);
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
+ int clean;
+ u32 head;
+
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+ rmb(); /* Make sure head is ready before touch any data */
+
+ if (!hclgevf_is_valid_csq_clean_head(csq, head)) {
+ dev_warn(&hdev->pdev->dev, "wrong cmd head (%u, %d-%d)\n", head,
+ csq->next_to_use, csq->next_to_clean);
+ dev_warn(&hdev->pdev->dev,
+ "Disabling any further commands to IMP firmware\n");
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ return -EIO;
+ }
+
+ clean = (head - csq->next_to_clean + csq->desc_num) % csq->desc_num;
+ csq->next_to_clean = head;
+ return clean;
+}
+
+static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw)
+{
+ u32 head;
+
+ head = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG);
+
+ return head == hw->cmq.csq.next_to_use;
+}
+
+static bool hclgevf_is_special_opcode(u16 opcode)
+{
+ static const u16 spec_opcode[] = {0x30, 0x31, 0x32};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
+ if (spec_opcode[i] == opcode)
+ return true;
+ }
+
+ return false;
+}
+
+static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
+{
+ struct hclgevf_dev *hdev = ring->dev;
+ struct hclgevf_hw *hw = &hdev->hw;
+ u32 reg_val;
+
+ if (ring->flag == HCLGEVF_TYPE_CSQ) {
+ reg_val = lower_32_bits(ring->desc_dma_addr);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
+ reg_val = upper_32_bits(ring->desc_dma_addr);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = hclgevf_read_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ reg_val &= HCLGEVF_NIC_SW_RST_RDY;
+ reg_val |= (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ } else {
+ reg_val = lower_32_bits(ring->desc_dma_addr);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
+ reg_val = upper_32_bits(ring->desc_dma_addr);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
+
+ reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
+
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+ }
+}
+
+static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
+{
+ hclgevf_cmd_config_regs(&hw->cmq.csq);
+ hclgevf_cmd_config_regs(&hw->cmq.crq);
+}
+
+static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
+
+ ring->desc = dma_alloc_coherent(cmq_ring_to_dev(ring), size,
+ &ring->desc_dma_addr, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
+{
+ int size = ring->desc_num * sizeof(struct hclgevf_desc);
+
+ if (ring->desc) {
+ dma_free_coherent(cmq_ring_to_dev(ring), size,
+ ring->desc, ring->desc_dma_addr);
+ ring->desc = NULL;
+ }
+}
+
+static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
+{
+ struct hclgevf_hw *hw = &hdev->hw;
+ struct hclgevf_cmq_ring *ring =
+ (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
+ int ret;
+
+ ring->dev = hdev;
+ ring->flag = ring_type;
+
+ /* allocate CSQ/CRQ descriptor */
+ ret = hclgevf_alloc_cmd_desc(ring);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
+ (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
+
+ return ret;
+}
+
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode, bool is_read)
+{
+ memset(desc, 0, sizeof(struct hclgevf_desc));
+ desc->opcode = cpu_to_le16(opcode);
+ desc->flag = cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR |
+ HCLGEVF_CMD_FLAG_IN);
+ if (is_read)
+ desc->flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_WR);
+ else
+ desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
+}
+
+static int hclgevf_cmd_convert_err_code(u16 desc_ret)
+{
+ switch (desc_ret) {
+ case HCLGEVF_CMD_EXEC_SUCCESS:
+ return 0;
+ case HCLGEVF_CMD_NO_AUTH:
+ return -EPERM;
+ case HCLGEVF_CMD_NOT_SUPPORTED:
+ return -EOPNOTSUPP;
+ case HCLGEVF_CMD_QUEUE_FULL:
+ return -EXFULL;
+ case HCLGEVF_CMD_NEXT_ERR:
+ return -ENOSR;
+ case HCLGEVF_CMD_UNEXE_ERR:
+ return -ENOTBLK;
+ case HCLGEVF_CMD_PARA_ERR:
+ return -EINVAL;
+ case HCLGEVF_CMD_RESULT_ERR:
+ return -ERANGE;
+ case HCLGEVF_CMD_TIMEOUT:
+ return -ETIME;
+ case HCLGEVF_CMD_HILINK_ERR:
+ return -ENOLINK;
+ case HCLGEVF_CMD_QUEUE_ILLEGAL:
+ return -ENXIO;
+ case HCLGEVF_CMD_INVALID:
+ return -EBADR;
+ default:
+ return -EIO;
+ }
+}
+
+/* hclgevf_cmd_send - send command to command queue
+ * @hw: pointer to the hw struct
+ * @desc: prefilled descriptor for describing the command
+ * @num : the number of descriptors to be sent
+ *
+ * This is the main send command for command queue, it
+ * sends the queue, cleans the queue, etc
+ */
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
+{
+ struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
+ struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
+ struct hclgevf_desc *desc_to_use;
+ bool complete = false;
+ u32 timeout = 0;
+ int handle = 0;
+ int status = 0;
+ u16 retval;
+ u16 opcode;
+ int ntc;
+
+ spin_lock_bh(&hw->cmq.csq.lock);
+
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ if (num > hclgevf_ring_space(&hw->cmq.csq)) {
+ /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
+ * need update the SW HEAD pointer csq->next_to_clean
+ */
+ csq->next_to_clean = hclgevf_read_dev(hw,
+ HCLGEVF_NIC_CSQ_HEAD_REG);
+ spin_unlock_bh(&hw->cmq.csq.lock);
+ return -EBUSY;
+ }
+
+ /* Record the location of desc in the ring for this time
+ * which will be use for hardware to write back
+ */
+ ntc = hw->cmq.csq.next_to_use;
+ opcode = le16_to_cpu(desc[0].opcode);
+ while (handle < num) {
+ desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
+ *desc_to_use = desc[handle];
+ (hw->cmq.csq.next_to_use)++;
+ if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
+ hw->cmq.csq.next_to_use = 0;
+ handle++;
+ }
+
+ /* Write to hardware */
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
+ hw->cmq.csq.next_to_use);
+
+ /* If the command is sync, wait for the firmware to write back,
+ * if multi descriptors to be sent, use the first one to check
+ */
+ if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
+ do {
+ if (hclgevf_cmd_csq_done(hw))
+ break;
+ udelay(1);
+ timeout++;
+ } while (timeout < hw->cmq.tx_timeout);
+ }
+
+ if (hclgevf_cmd_csq_done(hw)) {
+ complete = true;
+ handle = 0;
+
+ while (handle < num) {
+ /* Get the result of hardware write back */
+ desc_to_use = &hw->cmq.csq.desc[ntc];
+ desc[handle] = *desc_to_use;
+
+ if (likely(!hclgevf_is_special_opcode(opcode)))
+ retval = le16_to_cpu(desc[handle].retval);
+ else
+ retval = le16_to_cpu(desc[0].retval);
+
+ status = hclgevf_cmd_convert_err_code(retval);
+ hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
+ ntc++;
+ handle++;
+ if (ntc == hw->cmq.csq.desc_num)
+ ntc = 0;
+ }
+ }
+
+ if (!complete)
+ status = -EBADE;
+
+ /* Clean the command send queue */
+ handle = hclgevf_cmd_csq_clean(hw);
+ if (handle != num)
+ dev_warn(&hdev->pdev->dev,
+ "cleaned %d, need to clean %d\n", handle, num);
+
+ spin_unlock_bh(&hw->cmq.csq.lock);
+
+ return status;
+}
+
+static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ set_bit(HNAE3_DEV_SUPPORT_FD_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_GRO_B, ae_dev->caps);
+ set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
+}
+
+static void hclgevf_parse_capability(struct hclgevf_dev *hdev,
+ struct hclgevf_query_version_cmd *cmd)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ u32 caps;
+
+ caps = __le32_to_cpu(cmd->caps[0]);
+
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B))
+ set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B))
+ set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps);
+ if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B))
+ set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps);
+}
+
+static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev *hdev)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclgevf_query_version_cmd *resp;
+ struct hclgevf_desc desc;
+ int status;
+
+ resp = (struct hclgevf_query_version_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_FW_VER, 1);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ return status;
+
+ hdev->fw_version = le32_to_cpu(resp->firmware);
+
+ ae_dev->dev_version = le32_to_cpu(resp->hardware) <<
+ HNAE3_PCI_REVISION_BIT_SIZE;
+ ae_dev->dev_version |= hdev->pdev->revision;
+
+ if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ hclgevf_set_default_capability(hdev);
+
+ hclgevf_parse_capability(hdev, resp);
+
+ return status;
+}
+
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ /* Setup the lock for command queue */
+ spin_lock_init(&hdev->hw.cmq.csq.lock);
+ spin_lock_init(&hdev->hw.cmq.crq.lock);
+
+ hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
+ hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+ hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
+
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "CSQ ring setup error %d\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "CRQ ring setup error %d\n", ret);
+ goto err_csq;
+ }
+
+ return 0;
+err_csq:
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ return ret;
+}
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
+
+ /* initialize the pointers of async rx queue of mailbox */
+ hdev->arq.hdev = hdev;
+ hdev->arq.head = 0;
+ hdev->arq.tail = 0;
+ atomic_set(&hdev->arq.count, 0);
+ hdev->hw.cmq.csq.next_to_clean = 0;
+ hdev->hw.cmq.csq.next_to_use = 0;
+ hdev->hw.cmq.crq.next_to_clean = 0;
+ hdev->hw.cmq.crq.next_to_use = 0;
+
+ hclgevf_cmd_init_regs(&hdev->hw);
+
+ spin_unlock(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ /* Check if there is new reset pending, because the higher level
+ * reset may happen when lower level reset is being processed.
+ */
+ if (hclgevf_is_reset_pending(hdev)) {
+ ret = -EBUSY;
+ goto err_cmd_init;
+ }
+
+ /* get version and device capabilities */
+ ret = hclgevf_cmd_query_version_and_capability(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to query version and capabilities, ret = %d\n", ret);
+ goto err_cmd_init;
+ }
+
+ dev_info(&hdev->pdev->dev, "The firmware version is %lu.%lu.%lu.%lu\n",
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK,
+ HNAE3_FW_VERSION_BYTE3_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK,
+ HNAE3_FW_VERSION_BYTE2_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK,
+ HNAE3_FW_VERSION_BYTE1_SHIFT),
+ hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK,
+ HNAE3_FW_VERSION_BYTE0_SHIFT));
+
+ return 0;
+
+err_cmd_init:
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+
+ return ret;
+}
+
+static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw)
+{
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
+ hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
+}
+
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
+{
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ /* wait to ensure that the firmware completes the possible left
+ * over commands.
+ */
+ msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME);
+ spin_lock_bh(&hdev->hw.cmq.csq.lock);
+ spin_lock(&hdev->hw.cmq.crq.lock);
+ hclgevf_cmd_uninit_regs(&hdev->hw);
+ spin_unlock(&hdev->hw.cmq.crq.lock);
+ spin_unlock_bh(&hdev->hw.cmq.csq.lock);
+
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
+ hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
new file mode 100644
index 000000000..f90ff8a84
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h
@@ -0,0 +1,317 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_CMD_H
+#define __HCLGEVF_CMD_H
+#include <linux/io.h>
+#include <linux/types.h>
+#include "hnae3.h"
+
+#define HCLGEVF_CMDQ_TX_TIMEOUT 30000
+#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200
+#define HCLGEVF_CMDQ_RX_INVLD_B 0
+#define HCLGEVF_CMDQ_RX_OUTVLD_B 1
+
+struct hclgevf_hw;
+struct hclgevf_dev;
+
+struct hclgevf_desc {
+ __le16 opcode;
+ __le16 flag;
+ __le16 retval;
+ __le16 rsv;
+ __le32 data[6];
+};
+
+struct hclgevf_desc_cb {
+ dma_addr_t dma;
+ void *va;
+ u32 length;
+};
+
+struct hclgevf_cmq_ring {
+ dma_addr_t desc_dma_addr;
+ struct hclgevf_desc *desc;
+ struct hclgevf_desc_cb *desc_cb;
+ struct hclgevf_dev *dev;
+ u32 head;
+ u32 tail;
+
+ u16 buf_size;
+ u16 desc_num;
+ int next_to_use;
+ int next_to_clean;
+ u8 flag;
+ spinlock_t lock; /* Command queue lock */
+};
+
+enum hclgevf_cmd_return_status {
+ HCLGEVF_CMD_EXEC_SUCCESS = 0,
+ HCLGEVF_CMD_NO_AUTH = 1,
+ HCLGEVF_CMD_NOT_SUPPORTED = 2,
+ HCLGEVF_CMD_QUEUE_FULL = 3,
+ HCLGEVF_CMD_NEXT_ERR = 4,
+ HCLGEVF_CMD_UNEXE_ERR = 5,
+ HCLGEVF_CMD_PARA_ERR = 6,
+ HCLGEVF_CMD_RESULT_ERR = 7,
+ HCLGEVF_CMD_TIMEOUT = 8,
+ HCLGEVF_CMD_HILINK_ERR = 9,
+ HCLGEVF_CMD_QUEUE_ILLEGAL = 10,
+ HCLGEVF_CMD_INVALID = 11,
+};
+
+enum hclgevf_cmd_status {
+ HCLGEVF_STATUS_SUCCESS = 0,
+ HCLGEVF_ERR_CSQ_FULL = -1,
+ HCLGEVF_ERR_CSQ_TIMEOUT = -2,
+ HCLGEVF_ERR_CSQ_ERROR = -3
+};
+
+struct hclgevf_cmq {
+ struct hclgevf_cmq_ring csq;
+ struct hclgevf_cmq_ring crq;
+ u16 tx_timeout; /* Tx timeout */
+ enum hclgevf_cmd_status last_status;
+};
+
+#define HCLGEVF_CMD_FLAG_IN_VALID_SHIFT 0
+#define HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT 1
+#define HCLGEVF_CMD_FLAG_NEXT_SHIFT 2
+#define HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT 3
+#define HCLGEVF_CMD_FLAG_NO_INTR_SHIFT 4
+#define HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT 5
+
+#define HCLGEVF_CMD_FLAG_IN BIT(HCLGEVF_CMD_FLAG_IN_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_OUT BIT(HCLGEVF_CMD_FLAG_OUT_VALID_SHIFT)
+#define HCLGEVF_CMD_FLAG_NEXT BIT(HCLGEVF_CMD_FLAG_NEXT_SHIFT)
+#define HCLGEVF_CMD_FLAG_WR BIT(HCLGEVF_CMD_FLAG_WR_OR_RD_SHIFT)
+#define HCLGEVF_CMD_FLAG_NO_INTR BIT(HCLGEVF_CMD_FLAG_NO_INTR_SHIFT)
+#define HCLGEVF_CMD_FLAG_ERR_INTR BIT(HCLGEVF_CMD_FLAG_ERR_INTR_SHIFT)
+
+enum hclgevf_opcode_type {
+ /* Generic command */
+ HCLGEVF_OPC_QUERY_FW_VER = 0x0001,
+ HCLGEVF_OPC_QUERY_VF_RSRC = 0x0024,
+ HCLGEVF_OPC_QUERY_DEV_SPECS = 0x0050,
+
+ /* TQP command */
+ HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
+ HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
+ HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
+ /* GRO command */
+ HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
+ /* RSS cmd */
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
+ HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
+ HCLGEVF_OPC_RSS_INDIR_TABLE = 0x0D07,
+ HCLGEVF_OPC_RSS_TC_MODE = 0x0D08,
+ /* Mailbox cmd */
+ HCLGEVF_OPC_MBX_VF_TO_PF = 0x2001,
+};
+
+#define HCLGEVF_TQP_REG_OFFSET 0x80000
+#define HCLGEVF_TQP_REG_SIZE 0x200
+
+struct hclgevf_tqp_map {
+ __le16 tqp_id; /* Absolute tqp id for in this pf */
+ u8 tqp_vf; /* VF id */
+#define HCLGEVF_TQP_MAP_TYPE_PF 0
+#define HCLGEVF_TQP_MAP_TYPE_VF 1
+#define HCLGEVF_TQP_MAP_TYPE_B 0
+#define HCLGEVF_TQP_MAP_EN_B 1
+ u8 tqp_flag; /* Indicate it's pf or vf tqp */
+ __le16 tqp_vid; /* Virtual id in this pf/vf */
+ u8 rsv[18];
+};
+
+#define HCLGEVF_VECTOR_ELEMENTS_PER_CMD 10
+
+enum hclgevf_int_type {
+ HCLGEVF_INT_TX = 0,
+ HCLGEVF_INT_RX,
+ HCLGEVF_INT_EVENT,
+};
+
+struct hclgevf_ctrl_vector_chain {
+ u8 int_vector_id;
+ u8 int_cause_num;
+#define HCLGEVF_INT_TYPE_S 0
+#define HCLGEVF_INT_TYPE_M 0x3
+#define HCLGEVF_TQP_ID_S 2
+#define HCLGEVF_TQP_ID_M (0x3fff << HCLGEVF_TQP_ID_S)
+ __le16 tqp_type_and_id[HCLGEVF_VECTOR_ELEMENTS_PER_CMD];
+ u8 vfid;
+ u8 resv;
+};
+
+enum HCLGEVF_CAP_BITS {
+ HCLGEVF_CAP_UDP_GSO_B,
+ HCLGEVF_CAP_QB_B,
+ HCLGEVF_CAP_FD_FORWARD_TC_B,
+ HCLGEVF_CAP_PTP_B,
+ HCLGEVF_CAP_INT_QL_B,
+ HCLGEVF_CAP_SIMPLE_BD_B,
+ HCLGEVF_CAP_TX_PUSH_B,
+ HCLGEVF_CAP_PHY_IMP_B,
+ HCLGEVF_CAP_TQP_TXRX_INDEP_B,
+ HCLGEVF_CAP_HW_PAD_B,
+ HCLGEVF_CAP_STASH_B,
+};
+
+#define HCLGEVF_QUERY_CAP_LENGTH 3
+struct hclgevf_query_version_cmd {
+ __le32 firmware;
+ __le32 hardware;
+ __le32 rsv;
+ __le32 caps[HCLGEVF_QUERY_CAP_LENGTH]; /* capabilities of device */
+};
+
+#define HCLGEVF_MSIX_OFT_ROCEE_S 0
+#define HCLGEVF_MSIX_OFT_ROCEE_M (0xffff << HCLGEVF_MSIX_OFT_ROCEE_S)
+#define HCLGEVF_VEC_NUM_S 0
+#define HCLGEVF_VEC_NUM_M (0xff << HCLGEVF_VEC_NUM_S)
+struct hclgevf_query_res_cmd {
+ __le16 tqp_num;
+ __le16 reserved;
+ __le16 msixcap_localid_ba_nic;
+ __le16 msixcap_localid_ba_rocee;
+ __le16 vf_intr_vector_number;
+ __le16 rsv[7];
+};
+
+#define HCLGEVF_GRO_EN_B 0
+struct hclgevf_cfg_gro_status_cmd {
+ u8 gro_en;
+ u8 rsv[23];
+};
+
+#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
+#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
+#define HCLGEVF_RSS_HASH_KEY_NUM 16
+struct hclgevf_rss_config_cmd {
+ u8 hash_config;
+ u8 rsv[7];
+ u8 hash_key[HCLGEVF_RSS_HASH_KEY_NUM];
+};
+
+struct hclgevf_rss_input_tuple_cmd {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+ u8 rsv[16];
+};
+
+#define HCLGEVF_RSS_CFG_TBL_SIZE 16
+
+struct hclgevf_rss_indirection_table_cmd {
+ u16 start_table_index;
+ u16 rss_set_bitmap;
+ u8 rsv[4];
+ u8 rss_result[HCLGEVF_RSS_CFG_TBL_SIZE];
+};
+
+#define HCLGEVF_RSS_TC_OFFSET_S 0
+#define HCLGEVF_RSS_TC_OFFSET_M (0x3ff << HCLGEVF_RSS_TC_OFFSET_S)
+#define HCLGEVF_RSS_TC_SIZE_S 12
+#define HCLGEVF_RSS_TC_SIZE_M (0x7 << HCLGEVF_RSS_TC_SIZE_S)
+#define HCLGEVF_RSS_TC_VALID_B 15
+#define HCLGEVF_MAX_TC_NUM 8
+struct hclgevf_rss_tc_mode_cmd {
+ u16 rss_tc_mode[HCLGEVF_MAX_TC_NUM];
+ u8 rsv[8];
+};
+
+#define HCLGEVF_LINK_STS_B 0
+#define HCLGEVF_LINK_STATUS BIT(HCLGEVF_LINK_STS_B)
+struct hclgevf_link_status_cmd {
+ u8 status;
+ u8 rsv[23];
+};
+
+#define HCLGEVF_RING_ID_MASK 0x3ff
+#define HCLGEVF_TQP_ENABLE_B 0
+
+struct hclgevf_cfg_com_tqp_queue_cmd {
+ __le16 tqp_id;
+ __le16 stream_id;
+ u8 enable;
+ u8 rsv[19];
+};
+
+struct hclgevf_cfg_tx_queue_pointer_cmd {
+ __le16 tqp_id;
+ __le16 tx_tail;
+ __le16 tx_head;
+ __le16 fbd_num;
+ __le16 ring_offset;
+ u8 rsv[14];
+};
+
+#define HCLGEVF_TYPE_CRQ 0
+#define HCLGEVF_TYPE_CSQ 1
+#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000
+#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004
+#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008
+#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010
+#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014
+#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018
+#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c
+#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020
+#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024
+#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028
+
+/* this bit indicates that the driver is ready for hardware reset */
+#define HCLGEVF_NIC_SW_RST_RDY_B 16
+#define HCLGEVF_NIC_SW_RST_RDY BIT(HCLGEVF_NIC_SW_RST_RDY_B)
+
+#define HCLGEVF_NIC_CMQ_DESC_NUM 1024
+#define HCLGEVF_NIC_CMQ_DESC_NUM_S 3
+#define HCLGEVF_NIC_CMDQ_INT_SRC_REG 0x27100
+
+#define HCLGEVF_QUERY_DEV_SPECS_BD_NUM 4
+
+struct hclgevf_dev_specs_0_cmd {
+ __le32 rsv0;
+ __le32 mac_entry_num;
+ __le32 mng_entry_num;
+ __le16 rss_ind_tbl_size;
+ __le16 rss_key_size;
+ __le16 int_ql_max;
+ u8 max_non_tso_bd_num;
+ u8 rsv1[5];
+};
+
+static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value)
+{
+ writel(value, base + reg);
+}
+
+static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
+{
+ u8 __iomem *reg_addr = READ_ONCE(base);
+
+ return readl(reg_addr + reg);
+}
+
+#define hclgevf_write_dev(a, reg, value) \
+ hclgevf_write_reg((a)->io_base, (reg), (value))
+#define hclgevf_read_dev(a, reg) \
+ hclgevf_read_reg((a)->io_base, (reg))
+
+#define HCLGEVF_SEND_SYNC(flag) \
+ ((flag) & HCLGEVF_CMD_FLAG_NO_INTR)
+
+int hclgevf_cmd_init(struct hclgevf_dev *hdev);
+void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
+int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
+
+int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
+void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
+ enum hclgevf_opcode_type opcode,
+ bool is_read);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
new file mode 100644
index 000000000..2bb0ce176
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
@@ -0,0 +1,3739 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include <linux/etherdevice.h>
+#include <linux/iopoll.h>
+#include <net/rtnetlink.h>
+#include "hclgevf_cmd.h"
+#include "hclgevf_main.h"
+#include "hclge_mbx.h"
+#include "hnae3.h"
+
+#define HCLGEVF_NAME "hclgevf"
+
+#define HCLGEVF_RESET_MAX_FAIL_CNT 5
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev);
+static struct hnae3_ae_algo ae_algovf;
+
+static struct workqueue_struct *hclgevf_wq;
+
+static const struct pci_device_id ae_algovf_pci_tbl[] = {
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_VF), 0},
+ {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF),
+ HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
+ /* required last entry */
+ {0, }
+};
+
+static const u8 hclgevf_hash_key[] = {
+ 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
+ 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
+ 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
+ 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
+ 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA
+};
+
+MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl);
+
+static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG,
+ HCLGEVF_CMDQ_TX_ADDR_H_REG,
+ HCLGEVF_CMDQ_TX_DEPTH_REG,
+ HCLGEVF_CMDQ_TX_TAIL_REG,
+ HCLGEVF_CMDQ_TX_HEAD_REG,
+ HCLGEVF_CMDQ_RX_ADDR_L_REG,
+ HCLGEVF_CMDQ_RX_ADDR_H_REG,
+ HCLGEVF_CMDQ_RX_DEPTH_REG,
+ HCLGEVF_CMDQ_RX_TAIL_REG,
+ HCLGEVF_CMDQ_RX_HEAD_REG,
+ HCLGEVF_VECTOR0_CMDQ_SRC_REG,
+ HCLGEVF_VECTOR0_CMDQ_STATE_REG,
+ HCLGEVF_CMDQ_INTR_EN_REG,
+ HCLGEVF_CMDQ_INTR_GEN_REG};
+
+static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE,
+ HCLGEVF_RST_ING,
+ HCLGEVF_GRO_EN_REG};
+
+static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG,
+ HCLGEVF_RING_RX_ADDR_H_REG,
+ HCLGEVF_RING_RX_BD_NUM_REG,
+ HCLGEVF_RING_RX_BD_LENGTH_REG,
+ HCLGEVF_RING_RX_MERGE_EN_REG,
+ HCLGEVF_RING_RX_TAIL_REG,
+ HCLGEVF_RING_RX_HEAD_REG,
+ HCLGEVF_RING_RX_FBD_NUM_REG,
+ HCLGEVF_RING_RX_OFFSET_REG,
+ HCLGEVF_RING_RX_FBD_OFFSET_REG,
+ HCLGEVF_RING_RX_STASH_REG,
+ HCLGEVF_RING_RX_BD_ERR_REG,
+ HCLGEVF_RING_TX_ADDR_L_REG,
+ HCLGEVF_RING_TX_ADDR_H_REG,
+ HCLGEVF_RING_TX_BD_NUM_REG,
+ HCLGEVF_RING_TX_PRIORITY_REG,
+ HCLGEVF_RING_TX_TC_REG,
+ HCLGEVF_RING_TX_MERGE_EN_REG,
+ HCLGEVF_RING_TX_TAIL_REG,
+ HCLGEVF_RING_TX_HEAD_REG,
+ HCLGEVF_RING_TX_FBD_NUM_REG,
+ HCLGEVF_RING_TX_OFFSET_REG,
+ HCLGEVF_RING_TX_EBD_NUM_REG,
+ HCLGEVF_RING_TX_EBD_OFFSET_REG,
+ HCLGEVF_RING_TX_BD_ERR_REG,
+ HCLGEVF_RING_EN_REG};
+
+static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG,
+ HCLGEVF_TQP_INTR_GL0_REG,
+ HCLGEVF_TQP_INTR_GL1_REG,
+ HCLGEVF_TQP_INTR_GL2_REG,
+ HCLGEVF_TQP_INTR_RL_REG};
+
+static struct hclgevf_dev *hclgevf_ae_get_hdev(struct hnae3_handle *handle)
+{
+ if (!handle->client)
+ return container_of(handle, struct hclgevf_dev, nic);
+ else if (handle->client->type == HNAE3_CLIENT_ROCE)
+ return container_of(handle, struct hclgevf_dev, roce);
+ else
+ return container_of(handle, struct hclgevf_dev, nic);
+}
+
+static int hclgevf_tqps_update_stats(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_desc desc;
+ struct hclgevf_tqp *tqp;
+ int status;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_QUERY_RX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_rx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_TX_STATUS,
+ true);
+
+ desc.data[0] = cpu_to_le32(tqp->index & 0x1ff);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Query tqp stat fail, status = %d,queue = %d\n",
+ status, i);
+ return status;
+ }
+ tqp->tqp_stats.rcb_tx_ring_pktnum_rcd +=
+ le32_to_cpu(desc.data[1]);
+ }
+
+ return 0;
+}
+
+static u64 *hclgevf_tqps_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclgevf_tqp *tqp;
+ u64 *buff = data;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_tx_ring_pktnum_rcd;
+ }
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
+ *buff++ = tqp->tqp_stats.rcb_rx_ring_pktnum_rcd;
+ }
+
+ return buff;
+}
+
+static int hclgevf_tqps_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+
+ return kinfo->num_tqps * 2;
+}
+
+static u8 *hclgevf_tqps_get_strings(struct hnae3_handle *handle, u8 *data)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u8 *buff = data;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "txq%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ struct hclgevf_tqp *tqp = container_of(kinfo->tqp[i],
+ struct hclgevf_tqp, q);
+ snprintf(buff, ETH_GSTRING_LEN, "rxq%d_pktnum_rcd",
+ tqp->index);
+ buff += ETH_GSTRING_LEN;
+ }
+
+ return buff;
+}
+
+static void hclgevf_update_stats(struct hnae3_handle *handle,
+ struct net_device_stats *net_stats)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int status;
+
+ status = hclgevf_tqps_update_stats(handle);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF update of TQPS stats fail, status = %d.\n",
+ status);
+}
+
+static int hclgevf_get_sset_count(struct hnae3_handle *handle, int strset)
+{
+ if (strset == ETH_SS_TEST)
+ return -EOPNOTSUPP;
+ else if (strset == ETH_SS_STATS)
+ return hclgevf_tqps_get_sset_count(handle, strset);
+
+ return 0;
+}
+
+static void hclgevf_get_strings(struct hnae3_handle *handle, u32 strset,
+ u8 *data)
+{
+ u8 *p = (char *)data;
+
+ if (strset == ETH_SS_STATS)
+ p = hclgevf_tqps_get_strings(handle, p);
+}
+
+static void hclgevf_get_stats(struct hnae3_handle *handle, u64 *data)
+{
+ hclgevf_tqps_get_stats(handle, data);
+}
+
+static void hclgevf_build_send_msg(struct hclge_vf_to_pf_msg *msg, u8 code,
+ u8 subcode)
+{
+ if (msg) {
+ memset(msg, 0, sizeof(struct hclge_vf_to_pf_msg));
+ msg->code = code;
+ msg->subcode = subcode;
+ }
+}
+
+static int hclgevf_get_tc_info(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 resp_msg;
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_TCINFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
+ sizeof(resp_msg));
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get TC info from PF failed %d",
+ status);
+ return status;
+ }
+
+ hdev->hw_tc_map = resp_msg;
+
+ return 0;
+}
+
+static int hclgevf_get_port_base_vlan_filter_state(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 resp_msg;
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_GET_PORT_BASE_VLAN_STATE);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, &resp_msg,
+ sizeof(u8));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get port based vlan state failed %d",
+ ret);
+ return ret;
+ }
+
+ nic->port_base_vlan_state = resp_msg;
+
+ return 0;
+}
+
+static int hclgevf_get_queue_info(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_RSS_INFO_LEN 6
+#define HCLGEVF_TQPS_ALLOC_OFFSET 0
+#define HCLGEVF_TQPS_RSS_SIZE_OFFSET 2
+#define HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET 4
+
+ u8 resp_msg[HCLGEVF_TQPS_RSS_INFO_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QINFO, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ HCLGEVF_TQPS_RSS_INFO_LEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get tqp info from PF failed %d",
+ status);
+ return status;
+ }
+
+ memcpy(&hdev->num_tqps, &resp_msg[HCLGEVF_TQPS_ALLOC_OFFSET],
+ sizeof(u16));
+ memcpy(&hdev->rss_size_max, &resp_msg[HCLGEVF_TQPS_RSS_SIZE_OFFSET],
+ sizeof(u16));
+ memcpy(&hdev->rx_buf_len, &resp_msg[HCLGEVF_TQPS_RX_BUFFER_LEN_OFFSET],
+ sizeof(u16));
+
+ return 0;
+}
+
+static int hclgevf_get_queue_depth(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_TQPS_DEPTH_INFO_LEN 4
+#define HCLGEVF_TQPS_NUM_TX_DESC_OFFSET 0
+#define HCLGEVF_TQPS_NUM_RX_DESC_OFFSET 2
+
+ u8 resp_msg[HCLGEVF_TQPS_DEPTH_INFO_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QDEPTH, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ HCLGEVF_TQPS_DEPTH_INFO_LEN);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get tqp depth info from PF failed %d",
+ ret);
+ return ret;
+ }
+
+ memcpy(&hdev->num_tx_desc, &resp_msg[HCLGEVF_TQPS_NUM_TX_DESC_OFFSET],
+ sizeof(u16));
+ memcpy(&hdev->num_rx_desc, &resp_msg[HCLGEVF_TQPS_NUM_RX_DESC_OFFSET],
+ sizeof(u16));
+
+ return 0;
+}
+
+static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+ u16 qid_in_pf = 0;
+ u8 resp_data[2];
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_QID_IN_PF, 0);
+ memcpy(send_msg.data, &queue_id, sizeof(queue_id));
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_data,
+ sizeof(resp_data));
+ if (!ret)
+ qid_in_pf = *(u16 *)resp_data;
+
+ return qid_in_pf;
+}
+
+static int hclgevf_get_pf_media_type(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 resp_msg[2];
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MEDIA_TYPE, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ sizeof(resp_msg));
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF request to get the pf port media type failed %d",
+ ret);
+ return ret;
+ }
+
+ hdev->hw.mac.media_type = resp_msg[0];
+ hdev->hw.mac.module_type = resp_msg[1];
+
+ return 0;
+}
+
+static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps,
+ sizeof(struct hclgevf_tqp), GFP_KERNEL);
+ if (!hdev->htqp)
+ return -ENOMEM;
+
+ tqp = hdev->htqp;
+
+ for (i = 0; i < hdev->num_tqps; i++) {
+ tqp->dev = &hdev->pdev->dev;
+ tqp->index = i;
+
+ tqp->q.ae_algo = &ae_algovf;
+ tqp->q.buf_size = hdev->rx_buf_len;
+ tqp->q.tx_desc_num = hdev->num_tx_desc;
+ tqp->q.rx_desc_num = hdev->num_rx_desc;
+ tqp->q.io_base = hdev->hw.io_base + HCLGEVF_TQP_REG_OFFSET +
+ i * HCLGEVF_TQP_REG_SIZE;
+
+ tqp++;
+ }
+
+ return 0;
+}
+
+static int hclgevf_knic_setup(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo;
+ u16 new_tqps = hdev->num_tqps;
+ unsigned int i;
+
+ kinfo = &nic->kinfo;
+ kinfo->num_tc = 0;
+ kinfo->num_tx_desc = hdev->num_tx_desc;
+ kinfo->num_rx_desc = hdev->num_rx_desc;
+ kinfo->rx_buf_len = hdev->rx_buf_len;
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++)
+ if (hdev->hw_tc_map & BIT(i))
+ kinfo->num_tc++;
+
+ kinfo->rss_size
+ = min_t(u16, hdev->rss_size_max, new_tqps / kinfo->num_tc);
+ new_tqps = kinfo->rss_size * kinfo->num_tc;
+ kinfo->num_tqps = min(new_tqps, hdev->num_tqps);
+
+ kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps,
+ sizeof(struct hnae3_queue *), GFP_KERNEL);
+ if (!kinfo->tqp)
+ return -ENOMEM;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ hdev->htqp[i].q.handle = &hdev->nic;
+ hdev->htqp[i].q.tqp_index = i;
+ kinfo->tqp[i] = &hdev->htqp[i].q;
+ }
+
+ /* after init the max rss_size and tqps, adjust the default tqp numbers
+ * and rss size with the actual vector numbers
+ */
+ kinfo->num_tqps = min_t(u16, hdev->num_nic_msix - 1, kinfo->num_tqps);
+ kinfo->rss_size = min_t(u16, kinfo->num_tqps / kinfo->num_tc,
+ kinfo->rss_size);
+
+ return 0;
+}
+
+static void hclgevf_request_link_info(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_STATUS, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed to fetch link status(%d) from PF", status);
+}
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state)
+{
+ struct hnae3_handle *rhandle = &hdev->roce;
+ struct hnae3_handle *handle = &hdev->nic;
+ struct hnae3_client *rclient;
+ struct hnae3_client *client;
+
+ if (test_and_set_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state))
+ return;
+
+ client = handle->client;
+ rclient = hdev->roce_client;
+
+ link_state =
+ test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state;
+
+ if (link_state != hdev->hw.mac.link) {
+ client->ops->link_status_change(handle, !!link_state);
+ if (rclient && rclient->ops->link_status_change)
+ rclient->ops->link_status_change(rhandle, !!link_state);
+ hdev->hw.mac.link = link_state;
+ }
+
+ clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state);
+}
+
+static void hclgevf_update_link_mode(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_ADVERTISING 0
+#define HCLGEVF_SUPPORTED 1
+
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_LINK_MODE, 0);
+ send_msg.data[0] = HCLGEVF_ADVERTISING;
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ send_msg.data[0] = HCLGEVF_SUPPORTED;
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static int hclgevf_set_handle_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ nic->ae_algo = &ae_algovf;
+ nic->pdev = hdev->pdev;
+ nic->numa_node_mask = hdev->numa_node_mask;
+ nic->flags |= HNAE3_SUPPORT_VF;
+
+ ret = hclgevf_knic_setup(hdev);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "VF knic setup failed %d\n",
+ ret);
+ return ret;
+}
+
+static void hclgevf_free_vector(struct hclgevf_dev *hdev, int vector_id)
+{
+ if (hdev->vector_status[vector_id] == HCLGEVF_INVALID_VPORT) {
+ dev_warn(&hdev->pdev->dev,
+ "vector(vector_id %d) has been freed.\n", vector_id);
+ return;
+ }
+
+ hdev->vector_status[vector_id] = HCLGEVF_INVALID_VPORT;
+ hdev->num_msi_left += 1;
+ hdev->num_msi_used -= 1;
+}
+
+static int hclgevf_get_vector(struct hnae3_handle *handle, u16 vector_num,
+ struct hnae3_vector_info *vector_info)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_vector_info *vector = vector_info;
+ int alloc = 0;
+ int i, j;
+
+ vector_num = min_t(u16, hdev->num_nic_msix - 1, vector_num);
+ vector_num = min(hdev->num_msi_left, vector_num);
+
+ for (j = 0; j < vector_num; j++) {
+ for (i = HCLGEVF_MISC_VECTOR_NUM + 1; i < hdev->num_msi; i++) {
+ if (hdev->vector_status[i] == HCLGEVF_INVALID_VPORT) {
+ vector->vector = pci_irq_vector(hdev->pdev, i);
+ vector->io_addr = hdev->hw.io_base +
+ HCLGEVF_VECTOR_REG_BASE +
+ (i - 1) * HCLGEVF_VECTOR_REG_OFFSET;
+ hdev->vector_status[i] = 0;
+ hdev->vector_irq[i] = vector->vector;
+
+ vector++;
+ alloc++;
+
+ break;
+ }
+ }
+ }
+ hdev->num_msi_left -= alloc;
+ hdev->num_msi_used += alloc;
+
+ return alloc;
+}
+
+static int hclgevf_get_vector_index(struct hclgevf_dev *hdev, int vector)
+{
+ int i;
+
+ for (i = 0; i < hdev->num_msi; i++)
+ if (vector == hdev->vector_irq[i])
+ return i;
+
+ return -EINVAL;
+}
+
+static int hclgevf_set_rss_algo_key(struct hclgevf_dev *hdev,
+ const u8 hfunc, const u8 *key)
+{
+ struct hclgevf_rss_config_cmd *req;
+ unsigned int key_offset = 0;
+ struct hclgevf_desc desc;
+ int key_counts;
+ int key_size;
+ int ret;
+
+ key_counts = HCLGEVF_RSS_KEY_SIZE;
+ req = (struct hclgevf_rss_config_cmd *)desc.data;
+
+ while (key_counts) {
+ hclgevf_cmd_setup_basic_desc(&desc,
+ HCLGEVF_OPC_RSS_GENERIC_CONFIG,
+ false);
+
+ req->hash_config |= (hfunc & HCLGEVF_RSS_HASH_ALGO_MASK);
+ req->hash_config |=
+ (key_offset << HCLGEVF_RSS_HASH_KEY_OFFSET_B);
+
+ key_size = min(HCLGEVF_RSS_HASH_KEY_NUM, key_counts);
+ memcpy(req->hash_key,
+ key + key_offset * HCLGEVF_RSS_HASH_KEY_NUM, key_size);
+
+ key_counts -= key_size;
+ key_offset++;
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Configure RSS config fail, status = %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static u32 hclgevf_get_rss_key_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_KEY_SIZE;
+}
+
+static u32 hclgevf_get_rss_indir_size(struct hnae3_handle *handle)
+{
+ return HCLGEVF_RSS_IND_TBL_SIZE;
+}
+
+static int hclgevf_set_rss_indir_table(struct hclgevf_dev *hdev)
+{
+ const u8 *indir = hdev->rss_cfg.rss_indirection_tbl;
+ struct hclgevf_rss_indirection_table_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+ int i, j;
+
+ req = (struct hclgevf_rss_indirection_table_cmd *)desc.data;
+
+ for (i = 0; i < HCLGEVF_RSS_CFG_TBL_NUM; i++) {
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INDIR_TABLE,
+ false);
+ req->start_table_index = i * HCLGEVF_RSS_CFG_TBL_SIZE;
+ req->rss_set_bitmap = HCLGEVF_RSS_SET_BITMAP_MSK;
+ for (j = 0; j < HCLGEVF_RSS_CFG_TBL_SIZE; j++)
+ req->rss_result[j] =
+ indir[i * HCLGEVF_RSS_CFG_TBL_SIZE + j];
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set RSS indirection table\n",
+ status);
+ return status;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_set_rss_tc_mode(struct hclgevf_dev *hdev, u16 rss_size)
+{
+ struct hclgevf_rss_tc_mode_cmd *req;
+ u16 tc_offset[HCLGEVF_MAX_TC_NUM];
+ u16 tc_valid[HCLGEVF_MAX_TC_NUM];
+ u16 tc_size[HCLGEVF_MAX_TC_NUM];
+ struct hclgevf_desc desc;
+ u16 roundup_size;
+ unsigned int i;
+ int status;
+
+ req = (struct hclgevf_rss_tc_mode_cmd *)desc.data;
+
+ roundup_size = roundup_pow_of_two(rss_size);
+ roundup_size = ilog2(roundup_size);
+
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ tc_valid[i] = 1;
+ tc_size[i] = roundup_size;
+ tc_offset[i] = (hdev->hw_tc_map & BIT(i)) ? rss_size * i : 0;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_TC_MODE, false);
+ for (i = 0; i < HCLGEVF_MAX_TC_NUM; i++) {
+ hnae3_set_bit(req->rss_tc_mode[i], HCLGEVF_RSS_TC_VALID_B,
+ (tc_valid[i] & 0x1));
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_SIZE_M,
+ HCLGEVF_RSS_TC_SIZE_S, tc_size[i]);
+ hnae3_set_field(req->rss_tc_mode[i], HCLGEVF_RSS_TC_OFFSET_M,
+ HCLGEVF_RSS_TC_OFFSET_S, tc_offset[i]);
+ }
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to set rss tc mode\n", status);
+
+ return status;
+}
+
+/* for revision 0x20, vf shared the same rss config with pf */
+static int hclgevf_get_rss_hash_key(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_RSS_MBX_RESP_LEN 8
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 resp_msg[HCLGEVF_RSS_MBX_RESP_LEN];
+ struct hclge_vf_to_pf_msg send_msg;
+ u16 msg_num, hash_key_index;
+ u8 index;
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_RSS_KEY, 0);
+ msg_num = (HCLGEVF_RSS_KEY_SIZE + HCLGEVF_RSS_MBX_RESP_LEN - 1) /
+ HCLGEVF_RSS_MBX_RESP_LEN;
+ for (index = 0; index < msg_num; index++) {
+ send_msg.data[0] = index;
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, resp_msg,
+ HCLGEVF_RSS_MBX_RESP_LEN);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "VF get rss hash key from PF failed, ret=%d",
+ ret);
+ return ret;
+ }
+
+ hash_key_index = HCLGEVF_RSS_MBX_RESP_LEN * index;
+ if (index == msg_num - 1)
+ memcpy(&rss_cfg->rss_hash_key[hash_key_index],
+ &resp_msg[0],
+ HCLGEVF_RSS_KEY_SIZE - hash_key_index);
+ else
+ memcpy(&rss_cfg->rss_hash_key[hash_key_index],
+ &resp_msg[0], HCLGEVF_RSS_MBX_RESP_LEN);
+ }
+
+ return 0;
+}
+
+static int hclgevf_get_rss(struct hnae3_handle *handle, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int i, ret;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ /* Get hash algorithm */
+ if (hfunc) {
+ switch (rss_cfg->hash_algo) {
+ case HCLGEVF_RSS_HASH_ALGO_TOEPLITZ:
+ *hfunc = ETH_RSS_HASH_TOP;
+ break;
+ case HCLGEVF_RSS_HASH_ALGO_SIMPLE:
+ *hfunc = ETH_RSS_HASH_XOR;
+ break;
+ default:
+ *hfunc = ETH_RSS_HASH_UNKNOWN;
+ break;
+ }
+ }
+
+ /* Get the RSS Key required by the user */
+ if (key)
+ memcpy(key, rss_cfg->rss_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
+ } else {
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+ if (key) {
+ ret = hclgevf_get_rss_hash_key(hdev);
+ if (ret)
+ return ret;
+ memcpy(key, rss_cfg->rss_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
+ }
+ }
+
+ if (indir)
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ indir[i] = rss_cfg->rss_indirection_tbl[i];
+
+ return 0;
+}
+
+static int hclgevf_parse_rss_hfunc(struct hclgevf_dev *hdev, const u8 hfunc,
+ u8 *hash_algo)
+{
+ switch (hfunc) {
+ case ETH_RSS_HASH_TOP:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ return 0;
+ case ETH_RSS_HASH_XOR:
+ *hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ return 0;
+ case ETH_RSS_HASH_NO_CHANGE:
+ *hash_algo = hdev->rss_cfg.hash_algo;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int hclgevf_set_rss(struct hnae3_handle *handle, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 hash_algo;
+ int ret, i;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ ret = hclgevf_parse_rss_hfunc(hdev, hfunc, &hash_algo);
+ if (ret)
+ return ret;
+
+ /* Set the RSS Hash Key if specififed by the user */
+ if (key) {
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo, key);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "invalid hfunc type %u\n", hfunc);
+ return ret;
+ }
+
+ /* Update the shadow RSS key with user specified qids */
+ memcpy(rss_cfg->rss_hash_key, key,
+ HCLGEVF_RSS_KEY_SIZE);
+ } else {
+ ret = hclgevf_set_rss_algo_key(hdev, hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+ }
+ rss_cfg->hash_algo = hash_algo;
+ }
+
+ /* update the shadow RSS table with user specified qids */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = indir[i];
+
+ /* update the hardware */
+ return hclgevf_set_rss_indir_table(hdev);
+}
+
+static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
+{
+ u8 hash_sets = nfc->data & RXH_L4_B_0_1 ? HCLGEVF_S_PORT_BIT : 0;
+
+ if (nfc->data & RXH_L4_B_2_3)
+ hash_sets |= HCLGEVF_D_PORT_BIT;
+ else
+ hash_sets &= ~HCLGEVF_D_PORT_BIT;
+
+ if (nfc->data & RXH_IP_SRC)
+ hash_sets |= HCLGEVF_S_IP_BIT;
+ else
+ hash_sets &= ~HCLGEVF_S_IP_BIT;
+
+ if (nfc->data & RXH_IP_DST)
+ hash_sets |= HCLGEVF_D_IP_BIT;
+ else
+ hash_sets &= ~HCLGEVF_D_IP_BIT;
+
+ if (nfc->flow_type == SCTP_V4_FLOW || nfc->flow_type == SCTP_V6_FLOW)
+ hash_sets |= HCLGEVF_V_TAG_BIT;
+
+ return hash_sets;
+}
+
+static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ u8 tuple_sets;
+ int ret;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ if (nfc->data &
+ ~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ tuple_sets = hclgevf_get_rss_hash_bits(nfc);
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ req->ipv4_tcp_en = tuple_sets;
+ break;
+ case TCP_V6_FLOW:
+ req->ipv6_tcp_en = tuple_sets;
+ break;
+ case UDP_V4_FLOW:
+ req->ipv4_udp_en = tuple_sets;
+ break;
+ case UDP_V6_FLOW:
+ req->ipv6_udp_en = tuple_sets;
+ break;
+ case SCTP_V4_FLOW:
+ req->ipv4_sctp_en = tuple_sets;
+ break;
+ case SCTP_V6_FLOW:
+ if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 &&
+ (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)))
+ return -EINVAL;
+
+ req->ipv6_sctp_en = tuple_sets;
+ break;
+ case IPV4_FLOW:
+ req->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ break;
+ case IPV6_FLOW:
+ req->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "Set rss tuple fail, status = %d\n", ret);
+ return ret;
+ }
+
+ rss_cfg->rss_tuple_sets.ipv4_tcp_en = req->ipv4_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv4_udp_en = req->ipv4_udp_en;
+ rss_cfg->rss_tuple_sets.ipv4_sctp_en = req->ipv4_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv4_fragment_en = req->ipv4_fragment_en;
+ rss_cfg->rss_tuple_sets.ipv6_tcp_en = req->ipv6_tcp_en;
+ rss_cfg->rss_tuple_sets.ipv6_udp_en = req->ipv6_udp_en;
+ rss_cfg->rss_tuple_sets.ipv6_sctp_en = req->ipv6_sctp_en;
+ rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en;
+ return 0;
+}
+
+static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
+ struct ethtool_rxnfc *nfc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ u8 tuple_sets;
+
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
+ return -EOPNOTSUPP;
+
+ nfc->data = 0;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ break;
+ case UDP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ break;
+ case TCP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ break;
+ case UDP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ break;
+ case SCTP_V4_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ break;
+ case SCTP_V6_FLOW:
+ tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!tuple_sets)
+ return 0;
+
+ if (tuple_sets & HCLGEVF_D_PORT_BIT)
+ nfc->data |= RXH_L4_B_2_3;
+ if (tuple_sets & HCLGEVF_S_PORT_BIT)
+ nfc->data |= RXH_L4_B_0_1;
+ if (tuple_sets & HCLGEVF_D_IP_BIT)
+ nfc->data |= RXH_IP_DST;
+ if (tuple_sets & HCLGEVF_S_IP_BIT)
+ nfc->data |= RXH_IP_SRC;
+
+ return 0;
+}
+
+static int hclgevf_set_rss_input_tuple(struct hclgevf_dev *hdev,
+ struct hclgevf_rss_cfg *rss_cfg)
+{
+ struct hclgevf_rss_input_tuple_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
+
+ req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
+
+ req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
+ req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
+ req->ipv4_sctp_en = rss_cfg->rss_tuple_sets.ipv4_sctp_en;
+ req->ipv4_fragment_en = rss_cfg->rss_tuple_sets.ipv4_fragment_en;
+ req->ipv6_tcp_en = rss_cfg->rss_tuple_sets.ipv6_tcp_en;
+ req->ipv6_udp_en = rss_cfg->rss_tuple_sets.ipv6_udp_en;
+ req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en;
+ req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en;
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Configure rss input fail, status = %d\n", ret);
+ return ret;
+}
+
+static int hclgevf_get_tc_size(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+
+ return rss_cfg->rss_size;
+}
+
+static int hclgevf_bind_ring_to_vector(struct hnae3_handle *handle, bool en,
+ int vector_id,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+ struct hnae3_ring_chain_node *node;
+ int status;
+ int i = 0;
+
+ memset(&send_msg, 0, sizeof(send_msg));
+ send_msg.code = en ? HCLGE_MBX_MAP_RING_TO_VECTOR :
+ HCLGE_MBX_UNMAP_RING_TO_VECTOR;
+ send_msg.vector_id = vector_id;
+
+ for (node = ring_chain; node; node = node->next) {
+ send_msg.param[i].ring_type =
+ hnae3_get_bit(node->flag, HNAE3_RING_TYPE_B);
+
+ send_msg.param[i].tqp_index = node->tqp_index;
+ send_msg.param[i].int_gl_index =
+ hnae3_get_field(node->int_gl_idx,
+ HNAE3_RING_GL_IDX_M,
+ HNAE3_RING_GL_IDX_S);
+
+ i++;
+ if (i == HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM || !node->next) {
+ send_msg.ring_num = i;
+
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, false,
+ NULL, 0);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "Map TQP fail, status is %d.\n",
+ status);
+ return status;
+ }
+ i = 0;
+ }
+ }
+
+ return 0;
+}
+
+static int hclgevf_map_ring_to_vector(struct hnae3_handle *handle, int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ return hclgevf_bind_ring_to_vector(handle, true, vector_id, ring_chain);
+}
+
+static int hclgevf_unmap_ring_from_vector(
+ struct hnae3_handle *handle,
+ int vector,
+ struct hnae3_ring_chain_node *ring_chain)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret, vector_id;
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ return 0;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "Get vector index fail. ret =%d\n", vector_id);
+ return vector_id;
+ }
+
+ ret = hclgevf_bind_ring_to_vector(handle, false, vector_id, ring_chain);
+ if (ret)
+ dev_err(&handle->pdev->dev,
+ "Unmap ring from vector fail. vector=%d, ret =%d\n",
+ vector_id,
+ ret);
+
+ return ret;
+}
+
+static int hclgevf_put_vector(struct hnae3_handle *handle, int vector)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int vector_id;
+
+ vector_id = hclgevf_get_vector_index(hdev, vector);
+ if (vector_id < 0) {
+ dev_err(&handle->pdev->dev,
+ "hclgevf_put_vector get vector index fail. ret =%d\n",
+ vector_id);
+ return vector_id;
+ }
+
+ hclgevf_free_vector(hdev, vector_id);
+
+ return 0;
+}
+
+static int hclgevf_cmd_set_promisc_mode(struct hclgevf_dev *hdev,
+ bool en_uc_pmc, bool en_mc_pmc,
+ bool en_bc_pmc)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ memset(&send_msg, 0, sizeof(send_msg));
+ send_msg.code = HCLGE_MBX_SET_PROMISC_MODE;
+ send_msg.en_bc = en_bc_pmc ? 1 : 0;
+ send_msg.en_uc = en_uc_pmc ? 1 : 0;
+ send_msg.en_mc = en_mc_pmc ? 1 : 0;
+
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "Set promisc mode fail, status is %d.\n", ret);
+
+ return ret;
+}
+
+static int hclgevf_set_promisc_mode(struct hnae3_handle *handle, bool en_uc_pmc,
+ bool en_mc_pmc)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ bool en_bc_pmc;
+
+ en_bc_pmc = hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2;
+
+ return hclgevf_cmd_set_promisc_mode(hdev, en_uc_pmc, en_mc_pmc,
+ en_bc_pmc);
+}
+
+static void hclgevf_request_update_promisc_mode(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+}
+
+static void hclgevf_sync_promisc_mode(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *handle = &hdev->nic;
+ bool en_uc_pmc = handle->netdev_flags & HNAE3_UPE;
+ bool en_mc_pmc = handle->netdev_flags & HNAE3_MPE;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state)) {
+ ret = hclgevf_set_promisc_mode(handle, en_uc_pmc, en_mc_pmc);
+ if (!ret)
+ clear_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+ }
+}
+
+static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, unsigned int tqp_id,
+ int stream_id, bool enable)
+{
+ struct hclgevf_cfg_com_tqp_queue_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclgevf_cfg_com_tqp_queue_cmd *)desc.data;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_CFG_COM_TQP_QUEUE,
+ false);
+ req->tqp_id = cpu_to_le16(tqp_id & HCLGEVF_RING_ID_MASK);
+ req->stream_id = cpu_to_le16(stream_id);
+ if (enable)
+ req->enable |= 1U << HCLGEVF_TQP_ENABLE_B;
+
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status)
+ dev_err(&hdev->pdev->dev,
+ "TQP enable fail, status =%d.\n", status);
+
+ return status;
+}
+
+static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclgevf_tqp *tqp;
+ int i;
+
+ for (i = 0; i < kinfo->num_tqps; i++) {
+ tqp = container_of(kinfo->tqp[i], struct hclgevf_tqp, q);
+ memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats));
+ }
+}
+
+static int hclgevf_get_host_mac_addr(struct hclgevf_dev *hdev, u8 *p)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 host_mac[ETH_ALEN];
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_GET_MAC_ADDR, 0);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, host_mac,
+ ETH_ALEN);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "fail to get VF MAC from host %d", status);
+ return status;
+ }
+
+ ether_addr_copy(p, host_mac);
+
+ return 0;
+}
+
+static void hclgevf_get_mac_addr(struct hnae3_handle *handle, u8 *p)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 host_mac_addr[ETH_ALEN];
+
+ if (hclgevf_get_host_mac_addr(hdev, host_mac_addr))
+ return;
+
+ hdev->has_pf_mac = !is_zero_ether_addr(host_mac_addr);
+ if (hdev->has_pf_mac)
+ ether_addr_copy(p, host_mac_addr);
+ else
+ ether_addr_copy(p, hdev->hw.mac.mac_addr);
+}
+
+static int hclgevf_set_mac_addr(struct hnae3_handle *handle, void *p,
+ bool is_first)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u8 *old_mac_addr = (u8 *)hdev->hw.mac.mac_addr;
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 *new_mac_addr = (u8 *)p;
+ int status;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_UNICAST, 0);
+ send_msg.subcode = HCLGE_MBX_MAC_VLAN_UC_MODIFY;
+ ether_addr_copy(send_msg.data, new_mac_addr);
+ if (is_first && !hdev->has_pf_mac)
+ eth_zero_addr(&send_msg.data[ETH_ALEN]);
+ else
+ ether_addr_copy(&send_msg.data[ETH_ALEN], old_mac_addr);
+ status = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (!status)
+ ether_addr_copy(hdev->hw.mac.mac_addr, new_mac_addr);
+
+ return status;
+}
+
+static struct hclgevf_mac_addr_node *
+hclgevf_find_mac_node(struct list_head *list, const u8 *mac_addr)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node)
+ if (ether_addr_equal(mac_addr, mac_node->mac_addr))
+ return mac_node;
+
+ return NULL;
+}
+
+static void hclgevf_update_mac_node(struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_NODE_STATE state)
+{
+ switch (state) {
+ /* from set_rx_mode or tmp_add_list */
+ case HCLGEVF_MAC_TO_ADD:
+ if (mac_node->state == HCLGEVF_MAC_TO_DEL)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ /* only from set_rx_mode */
+ case HCLGEVF_MAC_TO_DEL:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ }
+ break;
+ /* only from tmp_add_list, the mac_node->state won't be
+ * HCLGEVF_MAC_ACTIVE
+ */
+ case HCLGEVF_MAC_ACTIVE:
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ break;
+ }
+}
+
+static int hclgevf_update_mac_list(struct hnae3_handle *handle,
+ enum HCLGEVF_MAC_NODE_STATE state,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type,
+ const unsigned char *addr)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclgevf_mac_addr_node *mac_node;
+ struct list_head *list;
+
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* if the mac addr is already in the mac list, no need to add a new
+ * one into it, just check the mac addr state, convert it to a new
+ * new state, or just remove it, or do nothing.
+ */
+ mac_node = hclgevf_find_mac_node(list, addr);
+ if (mac_node) {
+ hclgevf_update_mac_node(mac_node, state);
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+ }
+ /* if this address is never added, unnecessary to delete */
+ if (state == HCLGEVF_MAC_TO_DEL) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOENT;
+ }
+
+ mac_node = kzalloc(sizeof(*mac_node), GFP_ATOMIC);
+ if (!mac_node) {
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return -ENOMEM;
+ }
+
+ mac_node->state = state;
+ ether_addr_copy(mac_node->mac_addr, addr);
+ list_add_tail(&mac_node->node, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+ return 0;
+}
+
+static int hclgevf_add_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_UC, addr);
+}
+
+static int hclgevf_rm_uc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_UC, addr);
+}
+
+static int hclgevf_add_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_ADDR_MC, addr);
+}
+
+static int hclgevf_rm_mc_addr(struct hnae3_handle *handle,
+ const unsigned char *addr)
+{
+ return hclgevf_update_mac_list(handle, HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ADDR_MC, addr);
+}
+
+static int hclgevf_add_del_mac_addr(struct hclgevf_dev *hdev,
+ struct hclgevf_mac_addr_node *mac_node,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ u8 code, subcode;
+
+ if (mac_type == HCLGEVF_MAC_ADDR_UC) {
+ code = HCLGE_MBX_SET_UNICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_UC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_UC_REMOVE;
+ } else {
+ code = HCLGE_MBX_SET_MULTICAST;
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD)
+ subcode = HCLGE_MBX_MAC_VLAN_MC_ADD;
+ else
+ subcode = HCLGE_MBX_MAC_VLAN_MC_REMOVE;
+ }
+
+ hclgevf_build_send_msg(&send_msg, code, subcode);
+ ether_addr_copy(send_msg.data, mac_node->mac_addr);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static void hclgevf_config_mac_list(struct hclgevf_dev *hdev,
+ struct list_head *list,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+ int ret;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ ret = hclgevf_add_del_mac_addr(hdev, mac_node, mac_type);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to configure mac %pM, state = %d, ret = %d\n",
+ mac_node->mac_addr, mac_node->state, ret);
+ return;
+ }
+ if (mac_node->state == HCLGEVF_MAC_TO_ADD) {
+ mac_node->state = HCLGEVF_MAC_ACTIVE;
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_add_list(struct list_head *add_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, add_list, node) {
+ /* if the mac address from tmp_add_list is not in the
+ * uc/mc_mac_list, it means have received a TO_DEL request
+ * during the time window of sending mac config request to PF
+ * If mac_node state is ACTIVE, then change its state to TO_DEL,
+ * then it will be removed at next time. If is TO_ADD, it means
+ * send TO_ADD request failed, so just remove the mac node.
+ */
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ hclgevf_update_mac_node(new_node, mac_node->state);
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else if (mac_node->state == HCLGEVF_MAC_ACTIVE) {
+ mac_node->state = HCLGEVF_MAC_TO_DEL;
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ } else {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+ }
+}
+
+static void hclgevf_sync_from_del_list(struct list_head *del_list,
+ struct list_head *mac_list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+
+ list_for_each_entry_safe(mac_node, tmp, del_list, node) {
+ new_node = hclgevf_find_mac_node(mac_list, mac_node->mac_addr);
+ if (new_node) {
+ /* If the mac addr is exist in the mac list, it means
+ * received a new request TO_ADD during the time window
+ * of sending mac addr configurrequest to PF, so just
+ * change the mac state to ACTIVE.
+ */
+ new_node->state = HCLGEVF_MAC_ACTIVE;
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ } else {
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, mac_list);
+ }
+ }
+}
+
+static void hclgevf_clear_list(struct list_head *list)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp;
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ list_del(&mac_node->node);
+ kfree(mac_node);
+ }
+}
+
+static void hclgevf_sync_mac_list(struct hclgevf_dev *hdev,
+ enum HCLGEVF_MAC_ADDR_TYPE mac_type)
+{
+ struct hclgevf_mac_addr_node *mac_node, *tmp, *new_node;
+ struct list_head tmp_add_list, tmp_del_list;
+ struct list_head *list;
+
+ INIT_LIST_HEAD(&tmp_add_list);
+ INIT_LIST_HEAD(&tmp_del_list);
+
+ /* move the mac addr to the tmp_add_list and tmp_del_list, then
+ * we can add/delete these mac addr outside the spin lock
+ */
+ list = (mac_type == HCLGEVF_MAC_ADDR_UC) ?
+ &hdev->mac_table.uc_mac_list : &hdev->mac_table.mc_mac_list;
+
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ list_for_each_entry_safe(mac_node, tmp, list, node) {
+ switch (mac_node->state) {
+ case HCLGEVF_MAC_TO_DEL:
+ list_del(&mac_node->node);
+ list_add_tail(&mac_node->node, &tmp_del_list);
+ break;
+ case HCLGEVF_MAC_TO_ADD:
+ new_node = kzalloc(sizeof(*new_node), GFP_ATOMIC);
+ if (!new_node)
+ goto stop_traverse;
+
+ ether_addr_copy(new_node->mac_addr, mac_node->mac_addr);
+ new_node->state = mac_node->state;
+ list_add_tail(&new_node->node, &tmp_add_list);
+ break;
+ default:
+ break;
+ }
+ }
+
+stop_traverse:
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+
+ /* delete first, in order to get max mac table space for adding */
+ hclgevf_config_mac_list(hdev, &tmp_del_list, mac_type);
+ hclgevf_config_mac_list(hdev, &tmp_add_list, mac_type);
+
+ /* if some mac addresses were added/deleted fail, move back to the
+ * mac_list, and retry at next time.
+ */
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_sync_from_del_list(&tmp_del_list, list);
+ hclgevf_sync_from_add_list(&tmp_add_list, list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
+static void hclgevf_sync_mac_table(struct hclgevf_dev *hdev)
+{
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_UC);
+ hclgevf_sync_mac_list(hdev, HCLGEVF_MAC_ADDR_MC);
+}
+
+static void hclgevf_uninit_mac_list(struct hclgevf_dev *hdev)
+{
+ spin_lock_bh(&hdev->mac_table.mac_list_lock);
+
+ hclgevf_clear_list(&hdev->mac_table.uc_mac_list);
+ hclgevf_clear_list(&hdev->mac_table.mc_mac_list);
+
+ spin_unlock_bh(&hdev->mac_table.mac_list_lock);
+}
+
+static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
+ __be16 proto, u16 vlan_id,
+ bool is_kill)
+{
+#define HCLGEVF_VLAN_MBX_IS_KILL_OFFSET 0
+#define HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET 1
+#define HCLGEVF_VLAN_MBX_PROTO_OFFSET 3
+
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ if (vlan_id > HCLGEVF_MAX_VLAN_ID)
+ return -EINVAL;
+
+ if (proto != htons(ETH_P_8021Q))
+ return -EPROTONOSUPPORT;
+
+ /* When device is resetting or reset failed, firmware is unable to
+ * handle mailbox. Just record the vlan id, and remove it after
+ * reset finished.
+ */
+ if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ return -EBUSY;
+ }
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_FILTER);
+ send_msg.data[HCLGEVF_VLAN_MBX_IS_KILL_OFFSET] = is_kill;
+ memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_VLAN_ID_OFFSET], &vlan_id,
+ sizeof(vlan_id));
+ memcpy(&send_msg.data[HCLGEVF_VLAN_MBX_PROTO_OFFSET], &proto,
+ sizeof(proto));
+ /* when remove hw vlan filter failed, record the vlan id,
+ * and try to remove it from hw later, to be consistence
+ * with stack.
+ */
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (is_kill && ret)
+ set_bit(vlan_id, hdev->vlan_del_fail_bmap);
+
+ return ret;
+}
+
+static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_SYNC_COUNT 60
+ struct hnae3_handle *handle = &hdev->nic;
+ int ret, sync_cnt = 0;
+ u16 vlan_id;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ while (vlan_id != VLAN_N_VID) {
+ ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
+ vlan_id, true);
+ if (ret)
+ return;
+
+ clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
+ sync_cnt++;
+ if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
+ return;
+
+ vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
+ }
+}
+
+static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_VLAN_RX_OFF_CFG);
+ send_msg.data[0] = enable ? 1 : 0;
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ /* disable vf queue before send queue reset msg to PF */
+ ret = hclgevf_tqp_enable(hdev, queue_id, 0, false);
+ if (ret)
+ return ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_QUEUE_RESET, 0);
+ memcpy(send_msg.data, &queue_id, sizeof(queue_id));
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+}
+
+static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_MTU, 0);
+ memcpy(send_msg.data, &new_mtu, sizeof(new_mtu));
+ return hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+}
+
+static int hclgevf_notify_client(struct hclgevf_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->nic_client;
+ struct hnae3_handle *handle = &hdev->nic;
+ int ret;
+
+ if (!test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state) ||
+ !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n",
+ type, ret);
+
+ return ret;
+}
+
+static int hclgevf_notify_roce_client(struct hclgevf_dev *hdev,
+ enum hnae3_reset_notify_type type)
+{
+ struct hnae3_client *client = hdev->roce_client;
+ struct hnae3_handle *handle = &hdev->roce;
+ int ret;
+
+ if (!test_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state) || !client)
+ return 0;
+
+ if (!client->ops->reset_notify)
+ return -EOPNOTSUPP;
+
+ ret = client->ops->reset_notify(handle, type);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "notify roce client failed %d(%d)",
+ type, ret);
+ return ret;
+}
+
+static int hclgevf_reset_wait(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_RESET_WAIT_US 20000
+#define HCLGEVF_RESET_WAIT_CNT 2000
+#define HCLGEVF_RESET_WAIT_TIMEOUT_US \
+ (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT)
+
+ u32 val;
+ int ret;
+
+ if (hdev->reset_type == HNAE3_VF_RESET)
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_VF_RST_ING, val,
+ !(val & HCLGEVF_VF_RST_ING_BIT),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
+ else
+ ret = readl_poll_timeout(hdev->hw.io_base +
+ HCLGEVF_RST_ING, val,
+ !(val & HCLGEVF_RST_ING_BITS),
+ HCLGEVF_RESET_WAIT_US,
+ HCLGEVF_RESET_WAIT_TIMEOUT_US);
+
+ /* hardware completion status should be available by this time */
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "couldn't get reset done status from h/w, timeout!\n");
+ return ret;
+ }
+
+ /* we will wait a bit more to let reset of the stack to complete. This
+ * might happen in case reset assertion was made by PF. Yes, this also
+ * means we might end up waiting bit more even for VF reset.
+ */
+ if (hdev->reset_type == HNAE3_VF_FULL_RESET)
+ msleep(5000);
+ else
+ msleep(500);
+
+ return 0;
+}
+
+static void hclgevf_reset_handshake(struct hclgevf_dev *hdev, bool enable)
+{
+ u32 reg_val;
+
+ reg_val = hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG);
+ if (enable)
+ reg_val |= HCLGEVF_NIC_SW_RST_RDY;
+ else
+ reg_val &= ~HCLGEVF_NIC_SW_RST_RDY;
+
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG,
+ reg_val);
+}
+
+static int hclgevf_reset_stack(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ /* uninitialize the nic client */
+ ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ /* re-initialize the hclge device */
+ ret = hclgevf_reset_hdev(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "hclge device re-init failed, VF is disabled!\n");
+ return ret;
+ }
+
+ /* bring up the nic client again */
+ ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT);
+ if (ret)
+ return ret;
+
+ /* clear handshake status with IMP */
+ hclgevf_reset_handshake(hdev, false);
+
+ /* bring up the nic to enable TX/RX again */
+ return hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+}
+
+static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_RESET_SYNC_TIME 100
+
+ if (hdev->reset_type == HNAE3_VF_FUNC_RESET) {
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_RESET, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, true, NULL, 0);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to assert VF reset, ret = %d\n", ret);
+ return ret;
+ }
+ hdev->rst_stats.vf_func_rst_cnt++;
+ }
+
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ /* inform hardware that preparatory work is done */
+ msleep(HCLGEVF_RESET_SYNC_TIME);
+ hclgevf_reset_handshake(hdev, true);
+ dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done\n",
+ hdev->reset_type);
+
+ return 0;
+}
+
+static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev)
+{
+ dev_info(&hdev->pdev->dev, "VF function reset count: %u\n",
+ hdev->rst_stats.vf_func_rst_cnt);
+ dev_info(&hdev->pdev->dev, "FLR reset count: %u\n",
+ hdev->rst_stats.flr_rst_cnt);
+ dev_info(&hdev->pdev->dev, "VF reset count: %u\n",
+ hdev->rst_stats.vf_rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset done count: %u\n",
+ hdev->rst_stats.rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "HW reset done count: %u\n",
+ hdev->rst_stats.hw_rst_done_cnt);
+ dev_info(&hdev->pdev->dev, "reset count: %u\n",
+ hdev->rst_stats.rst_cnt);
+ dev_info(&hdev->pdev->dev, "reset fail count: %u\n",
+ hdev->rst_stats.rst_fail_cnt);
+ dev_info(&hdev->pdev->dev, "vector0 interrupt enable status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_MISC_VECTOR_REG_BASE));
+ dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG));
+ dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG));
+ dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n",
+ hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING));
+ dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state);
+}
+
+static void hclgevf_reset_err_handle(struct hclgevf_dev *hdev)
+{
+ /* recover handshake status with IMP when reset fail */
+ hclgevf_reset_handshake(hdev, true);
+ hdev->rst_stats.rst_fail_cnt++;
+ dev_err(&hdev->pdev->dev, "failed to reset VF(%u)\n",
+ hdev->rst_stats.rst_fail_cnt);
+
+ if (hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT)
+ set_bit(hdev->reset_type, &hdev->reset_pending);
+
+ if (hclgevf_is_reset_pending(hdev)) {
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ hclgevf_reset_task_schedule(hdev);
+ } else {
+ set_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+ hclgevf_dump_rst_info(hdev);
+ }
+}
+
+static int hclgevf_reset_prepare(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ hdev->rst_stats.rst_cnt++;
+
+ /* perform reset of the stack & ae device for a client */
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ /* bring down the nic to stop any ongoing TX/RX */
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ rtnl_unlock();
+ if (ret)
+ return ret;
+
+ return hclgevf_reset_prepare_wait(hdev);
+}
+
+static int hclgevf_reset_rebuild(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ hdev->rst_stats.hw_rst_done_cnt++;
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ /* now, re-initialize the nic client and ae device */
+ ret = hclgevf_reset_stack(hdev);
+ rtnl_unlock();
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "failed to reset VF stack\n");
+ return ret;
+ }
+
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_INIT_CLIENT);
+ /* ignore RoCE notify error if it fails HCLGEVF_RESET_MAX_FAIL_CNT - 1
+ * times
+ */
+ if (ret &&
+ hdev->rst_stats.rst_fail_cnt < HCLGEVF_RESET_MAX_FAIL_CNT - 1)
+ return ret;
+
+ ret = hclgevf_notify_roce_client(hdev, HNAE3_UP_CLIENT);
+ if (ret)
+ return ret;
+
+ hdev->last_reset_time = jiffies;
+ hdev->rst_stats.rst_done_cnt++;
+ hdev->rst_stats.rst_fail_cnt = 0;
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+
+ return 0;
+}
+
+static void hclgevf_reset(struct hclgevf_dev *hdev)
+{
+ if (hclgevf_reset_prepare(hdev))
+ goto err_reset;
+
+ /* check if VF could successfully fetch the hardware reset completion
+ * status from the hardware
+ */
+ if (hclgevf_reset_wait(hdev)) {
+ /* can't do much in this situation, will disable VF */
+ dev_err(&hdev->pdev->dev,
+ "failed to fetch H/W reset completion status\n");
+ goto err_reset;
+ }
+
+ if (hclgevf_reset_rebuild(hdev))
+ goto err_reset;
+
+ return;
+
+err_reset:
+ hclgevf_reset_err_handle(hdev);
+}
+
+static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev,
+ unsigned long *addr)
+{
+ enum hnae3_reset_type rst_level = HNAE3_NONE_RESET;
+
+ /* return the highest priority reset level amongst all */
+ if (test_bit(HNAE3_VF_RESET, addr)) {
+ rst_level = HNAE3_VF_RESET;
+ clear_bit(HNAE3_VF_RESET, addr);
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) {
+ rst_level = HNAE3_VF_FULL_RESET;
+ clear_bit(HNAE3_VF_FULL_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_PF_FUNC_RESET;
+ clear_bit(HNAE3_VF_PF_FUNC_RESET, addr);
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) {
+ rst_level = HNAE3_VF_FUNC_RESET;
+ clear_bit(HNAE3_VF_FUNC_RESET, addr);
+ } else if (test_bit(HNAE3_FLR_RESET, addr)) {
+ rst_level = HNAE3_FLR_RESET;
+ clear_bit(HNAE3_FLR_RESET, addr);
+ }
+
+ return rst_level;
+}
+
+static void hclgevf_reset_event(struct pci_dev *pdev,
+ struct hnae3_handle *handle)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ dev_info(&hdev->pdev->dev, "received reset request from VF enet\n");
+
+ if (hdev->default_reset_request)
+ hdev->reset_level =
+ hclgevf_get_reset_level(hdev,
+ &hdev->default_reset_request);
+ else
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
+
+ /* reset of this VF requested */
+ set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state);
+ hclgevf_reset_task_schedule(hdev);
+
+ hdev->last_reset_time = jiffies;
+}
+
+static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev,
+ enum hnae3_reset_type rst_type)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ set_bit(rst_type, &hdev->default_reset_request);
+}
+
+static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en)
+{
+ writel(en ? 1 : 0, vector->addr);
+}
+
+static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev)
+{
+#define HCLGEVF_FLR_RETRY_WAIT_MS 500
+#define HCLGEVF_FLR_RETRY_CNT 5
+
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int retry_cnt = 0;
+ int ret;
+
+retry:
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ hdev->reset_type = HNAE3_FLR_RESET;
+ ret = hclgevf_reset_prepare(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "fail to prepare FLR, ret=%d\n",
+ ret);
+ if (hdev->reset_pending ||
+ retry_cnt++ < HCLGEVF_FLR_RETRY_CNT) {
+ dev_err(&hdev->pdev->dev,
+ "reset_pending:0x%lx, retry_cnt:%d\n",
+ hdev->reset_pending, retry_cnt);
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+ msleep(HCLGEVF_FLR_RETRY_WAIT_MS);
+ goto retry;
+ }
+ }
+
+ /* disable misc vector before FLR done */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ hdev->rst_stats.flr_rst_cnt++;
+}
+
+static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ ret = hclgevf_reset_rebuild(hdev);
+ if (ret)
+ dev_warn(&hdev->pdev->dev, "fail to rebuild, ret=%d\n",
+ ret);
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+}
+
+static u32 hclgevf_get_fw_version(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->fw_version;
+}
+
+static void hclgevf_get_misc_vector(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_misc_vector *vector = &hdev->misc_vector;
+
+ vector->vector_irq = pci_irq_vector(hdev->pdev,
+ HCLGEVF_MISC_VECTOR_NUM);
+ vector->addr = hdev->hw.io_base + HCLGEVF_MISC_VECTOR_REG_BASE;
+ /* vector status always valid for Vector 0 */
+ hdev->vector_status[HCLGEVF_MISC_VECTOR_NUM] = 0;
+ hdev->vector_irq[HCLGEVF_MISC_VECTOR_NUM] = vector->vector_irq;
+
+ hdev->num_msi_left -= 1;
+ hdev->num_msi_used += 1;
+}
+
+void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_RST_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
+}
+
+void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev)
+{
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_and_set_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, 0);
+}
+
+static void hclgevf_task_schedule(struct hclgevf_dev *hdev,
+ unsigned long delay)
+{
+ if (!test_bit(HCLGEVF_STATE_REMOVING, &hdev->state) &&
+ !test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ mod_delayed_work(hclgevf_wq, &hdev->service_task, delay);
+}
+
+static void hclgevf_reset_service_task(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_RESET_ATTEMPTS_CNT 3
+
+ if (!test_and_clear_bit(HCLGEVF_STATE_RST_SERVICE_SCHED, &hdev->state))
+ return;
+
+ down(&hdev->reset_sem);
+ set_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+
+ if (test_and_clear_bit(HCLGEVF_RESET_PENDING,
+ &hdev->reset_state)) {
+ /* PF has initmated that it is about to reset the hardware.
+ * We now have to poll & check if hardware has actually
+ * completed the reset sequence. On hardware reset completion,
+ * VF needs to reset the client and ae device.
+ */
+ hdev->reset_attempts = 0;
+
+ hdev->last_reset_time = jiffies;
+ hdev->reset_type =
+ hclgevf_get_reset_level(hdev, &hdev->reset_pending);
+ if (hdev->reset_type != HNAE3_NONE_RESET)
+ hclgevf_reset(hdev);
+ } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED,
+ &hdev->reset_state)) {
+ /* we could be here when either of below happens:
+ * 1. reset was initiated due to watchdog timeout caused by
+ * a. IMP was earlier reset and our TX got choked down and
+ * which resulted in watchdog reacting and inducing VF
+ * reset. This also means our cmdq would be unreliable.
+ * b. problem in TX due to other lower layer(example link
+ * layer not functioning properly etc.)
+ * 2. VF reset might have been initiated due to some config
+ * change.
+ *
+ * NOTE: Theres no clear way to detect above cases than to react
+ * to the response of PF for this reset request. PF will ack the
+ * 1b and 2. cases but we will not get any intimation about 1a
+ * from PF as cmdq would be in unreliable state i.e. mailbox
+ * communication between PF and VF would be broken.
+ *
+ * if we are never geting into pending state it means either:
+ * 1. PF is not receiving our request which could be due to IMP
+ * reset
+ * 2. PF is screwed
+ * We cannot do much for 2. but to check first we can try reset
+ * our PCIe + stack and see if it alleviates the problem.
+ */
+ if (hdev->reset_attempts > HCLGEVF_MAX_RESET_ATTEMPTS_CNT) {
+ /* prepare for full reset of stack + pcie interface */
+ set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending);
+
+ /* "defer" schedule the reset task again */
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ } else {
+ hdev->reset_attempts++;
+
+ set_bit(hdev->reset_level, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ }
+ hclgevf_reset_task_schedule(hdev);
+ }
+
+ hdev->reset_type = HNAE3_NONE_RESET;
+ clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+ up(&hdev->reset_sem);
+}
+
+static void hclgevf_mailbox_service_task(struct hclgevf_dev *hdev)
+{
+ if (!test_and_clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state))
+ return;
+
+ if (test_and_set_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state))
+ return;
+
+ hclgevf_mbx_async_handler(hdev);
+
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+}
+
+static void hclgevf_keep_alive(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ return;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_KEEP_ALIVE, 0);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF sends keep alive cmd failed(=%d)\n", ret);
+}
+
+static void hclgevf_periodic_service_task(struct hclgevf_dev *hdev)
+{
+ unsigned long delta = round_jiffies_relative(HZ);
+ struct hnae3_handle *handle = &hdev->nic;
+
+ if (test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state))
+ return;
+
+ if (time_is_after_jiffies(hdev->last_serv_processed + HZ)) {
+ delta = jiffies - hdev->last_serv_processed;
+
+ if (delta < round_jiffies_relative(HZ)) {
+ delta = round_jiffies_relative(HZ) - delta;
+ goto out;
+ }
+ }
+
+ hdev->serv_processed_cnt++;
+ if (!(hdev->serv_processed_cnt % HCLGEVF_KEEP_ALIVE_TASK_INTERVAL))
+ hclgevf_keep_alive(hdev);
+
+ if (test_bit(HCLGEVF_STATE_DOWN, &hdev->state)) {
+ hdev->last_serv_processed = jiffies;
+ goto out;
+ }
+
+ if (!(hdev->serv_processed_cnt % HCLGEVF_STATS_TIMER_INTERVAL))
+ hclgevf_tqps_update_stats(handle);
+
+ /* request the link status from the PF. PF would be able to tell VF
+ * about such updates in future so we might remove this later
+ */
+ hclgevf_request_link_info(hdev);
+
+ hclgevf_update_link_mode(hdev);
+
+ hclgevf_sync_vlan_filter(hdev);
+
+ hclgevf_sync_mac_table(hdev);
+
+ hclgevf_sync_promisc_mode(hdev);
+
+ hdev->last_serv_processed = jiffies;
+
+out:
+ hclgevf_task_schedule(hdev, delta);
+}
+
+static void hclgevf_service_task(struct work_struct *work)
+{
+ struct hclgevf_dev *hdev = container_of(work, struct hclgevf_dev,
+ service_task.work);
+
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
+ hclgevf_periodic_service_task(hdev);
+
+ /* Handle reset and mbx again in case periodical task delays the
+ * handling by calling hclgevf_task_schedule() in
+ * hclgevf_periodic_service_task()
+ */
+ hclgevf_reset_service_task(hdev);
+ hclgevf_mailbox_service_task(hdev);
+}
+
+static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr)
+{
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr);
+}
+
+static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
+ u32 *clearval)
+{
+ u32 val, cmdq_stat_reg, rst_ing_reg;
+
+ /* fetch the events from their corresponding regs */
+ cmdq_stat_reg = hclgevf_read_dev(&hdev->hw,
+ HCLGEVF_VECTOR0_CMDQ_STATE_REG);
+
+ if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_stat_reg) {
+ rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+ dev_info(&hdev->pdev->dev,
+ "receive reset interrupt 0x%x!\n", rst_ing_reg);
+ set_bit(HNAE3_VF_RESET, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RST_INT_B);
+ hdev->rst_stats.vf_rst_cnt++;
+ /* set up VF hardware reset status, its PF will clear
+ * this status when PF has initialized done.
+ */
+ val = hclgevf_read_dev(&hdev->hw, HCLGEVF_VF_RST_ING);
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_VF_RST_ING,
+ val | HCLGEVF_VF_RST_ING_BIT);
+ return HCLGEVF_VECTOR0_EVENT_RST;
+ }
+
+ /* check for vector0 mailbox(=CMDQ RX) event source */
+ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_stat_reg) {
+ /* for revision 0x21, clearing interrupt is writing bit 0
+ * to the clear register, writing bit 1 means to keep the
+ * old value.
+ * for revision 0x20, the clear register is a read & write
+ * register, so we should just write 0 to the bit we are
+ * handling, and keep other bits as cmdq_stat_reg.
+ */
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2)
+ *clearval = ~(1U << HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+ else
+ *clearval = cmdq_stat_reg &
+ ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B);
+
+ return HCLGEVF_VECTOR0_EVENT_MBX;
+ }
+
+ /* print other vector0 event source */
+ dev_info(&hdev->pdev->dev,
+ "vector 0 interrupt from unknown source, cmdq_src = %#x\n",
+ cmdq_stat_reg);
+
+ return HCLGEVF_VECTOR0_EVENT_OTHER;
+}
+
+static void hclgevf_reset_timer(struct timer_list *t)
+{
+ struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
+
+ hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
+ hclgevf_reset_task_schedule(hdev);
+}
+
+static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
+{
+#define HCLGEVF_RESET_DELAY 5
+
+ enum hclgevf_evt_cause event_cause;
+ struct hclgevf_dev *hdev = data;
+ u32 clearval;
+
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ event_cause = hclgevf_check_evt_cause(hdev, &clearval);
+ if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER)
+ hclgevf_clear_event_cause(hdev, clearval);
+
+ switch (event_cause) {
+ case HCLGEVF_VECTOR0_EVENT_RST:
+ mod_timer(&hdev->reset_timer,
+ jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
+ break;
+ case HCLGEVF_VECTOR0_EVENT_MBX:
+ hclgevf_mbx_handler(hdev);
+ break;
+ default:
+ break;
+ }
+
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return IRQ_HANDLED;
+}
+
+static int hclgevf_configure(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ /* get current port based vlan state from PF */
+ ret = hclgevf_get_port_base_vlan_filter_state(hdev);
+ if (ret)
+ return ret;
+
+ /* get queue configuration from PF */
+ ret = hclgevf_get_queue_info(hdev);
+ if (ret)
+ return ret;
+
+ /* get queue depth info from PF */
+ ret = hclgevf_get_queue_depth(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_get_pf_media_type(hdev);
+ if (ret)
+ return ret;
+
+ /* get tc configuration from PF */
+ return hclgevf_get_tc_info(hdev);
+}
+
+static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ struct hclgevf_dev *hdev;
+
+ hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL);
+ if (!hdev)
+ return -ENOMEM;
+
+ hdev->pdev = pdev;
+ hdev->ae_dev = ae_dev;
+ ae_dev->priv = hdev;
+
+ return 0;
+}
+
+static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *roce = &hdev->roce;
+ struct hnae3_handle *nic = &hdev->nic;
+
+ roce->rinfo.num_vectors = hdev->num_roce_msix;
+
+ if (hdev->num_msi_left < roce->rinfo.num_vectors ||
+ hdev->num_msi_left == 0)
+ return -EINVAL;
+
+ roce->rinfo.base_vector = hdev->roce_base_vector;
+
+ roce->rinfo.netdev = nic->kinfo.netdev;
+ roce->rinfo.roce_io_base = hdev->hw.io_base;
+
+ roce->pdev = nic->pdev;
+ roce->ae_algo = nic->ae_algo;
+ roce->numa_node_mask = nic->numa_node_mask;
+
+ return 0;
+}
+
+static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
+{
+ struct hclgevf_cfg_gro_status_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ if (!hnae3_dev_gro_supported(hdev))
+ return 0;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
+ false);
+ req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
+
+ req->gro_en = en ? 1 : 0;
+
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret)
+ dev_err(&hdev->pdev->dev,
+ "VF GRO hardware config cmd failed, ret = %d.\n", ret);
+
+ return ret;
+}
+
+static void hclgevf_rss_init_cfg(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ struct hclgevf_rss_tuple_cfg *tuple_sets;
+ u32 i;
+
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_TOEPLITZ;
+ rss_cfg->rss_size = hdev->nic.kinfo.rss_size;
+ tuple_sets = &rss_cfg->rss_tuple_sets;
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ rss_cfg->hash_algo = HCLGEVF_RSS_HASH_ALGO_SIMPLE;
+ memcpy(rss_cfg->rss_hash_key, hclgevf_hash_key,
+ HCLGEVF_RSS_KEY_SIZE);
+
+ tuple_sets->ipv4_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv4_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv4_sctp_en = HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv4_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv6_tcp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv6_udp_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ tuple_sets->ipv6_sctp_en =
+ hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 ?
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT :
+ HCLGEVF_RSS_INPUT_TUPLE_SCTP;
+ tuple_sets->ipv6_fragment_en = HCLGEVF_RSS_INPUT_TUPLE_OTHER;
+ }
+
+ /* Initialize RSS indirect table */
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size;
+}
+
+static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
+ int ret;
+
+ if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) {
+ ret = hclgevf_set_rss_algo_key(hdev, rss_cfg->hash_algo,
+ rss_cfg->rss_hash_key);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_set_rss_input_tuple(hdev, rss_cfg);
+ if (ret)
+ return ret;
+ }
+
+ ret = hclgevf_set_rss_indir_table(hdev);
+ if (ret)
+ return ret;
+
+ return hclgevf_set_rss_tc_mode(hdev, rss_cfg->rss_size);
+}
+
+static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ int ret;
+
+ ret = hclgevf_en_hw_strip_rxvtag(nic, true);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed to enable rx vlan offload, ret = %d\n", ret);
+ return ret;
+ }
+
+ return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0,
+ false);
+}
+
+static void hclgevf_flush_link_update(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_FLUSH_LINK_TIMEOUT 100000
+
+ unsigned long last = hdev->serv_processed_cnt;
+ int i = 0;
+
+ while (test_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state) &&
+ i++ < HCLGEVF_FLUSH_LINK_TIMEOUT &&
+ last == hdev->serv_processed_cnt)
+ usleep_range(1, 1);
+}
+
+static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ if (enable) {
+ hclgevf_task_schedule(hdev, 0);
+ } else {
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ /* flush memory to make sure DOWN is seen by service task */
+ smp_mb__before_atomic();
+ hclgevf_flush_link_update(hdev);
+ }
+}
+
+static int hclgevf_ae_start(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ clear_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ hclgevf_reset_tqp_stats(handle);
+
+ hclgevf_request_link_info(hdev);
+
+ hclgevf_update_link_mode(hdev);
+
+ return 0;
+}
+
+static void hclgevf_ae_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i;
+
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+
+ if (hdev->reset_type != HNAE3_VF_RESET)
+ for (i = 0; i < handle->kinfo.num_tqps; i++)
+ if (hclgevf_reset_tqp(handle, i))
+ break;
+
+ hclgevf_reset_tqp_stats(handle);
+ hclgevf_update_link_status(hdev, 0);
+}
+
+static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive)
+{
+#define HCLGEVF_STATE_ALIVE 1
+#define HCLGEVF_STATE_NOT_ALIVE 0
+
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_ALIVE, 0);
+ send_msg.data[0] = alive ? HCLGEVF_STATE_ALIVE :
+ HCLGEVF_STATE_NOT_ALIVE;
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static int hclgevf_client_start(struct hnae3_handle *handle)
+{
+ return hclgevf_set_alive(handle, true);
+}
+
+static void hclgevf_client_stop(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int ret;
+
+ ret = hclgevf_set_alive(handle, false);
+ if (ret)
+ dev_warn(&hdev->pdev->dev,
+ "%s failed %d\n", __func__, ret);
+}
+
+static void hclgevf_state_init(struct hclgevf_dev *hdev)
+{
+ clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state);
+ clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state);
+ clear_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state);
+
+ INIT_DELAYED_WORK(&hdev->service_task, hclgevf_service_task);
+
+ mutex_init(&hdev->mbx_resp.mbx_mutex);
+ sema_init(&hdev->reset_sem, 1);
+
+ spin_lock_init(&hdev->mac_table.mac_list_lock);
+ INIT_LIST_HEAD(&hdev->mac_table.uc_mac_list);
+ INIT_LIST_HEAD(&hdev->mac_table.mc_mac_list);
+
+ /* bring the device down */
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+}
+
+static void hclgevf_state_uninit(struct hclgevf_dev *hdev)
+{
+ set_bit(HCLGEVF_STATE_DOWN, &hdev->state);
+ set_bit(HCLGEVF_STATE_REMOVING, &hdev->state);
+
+ if (hdev->service_task.work.func)
+ cancel_delayed_work_sync(&hdev->service_task);
+
+ mutex_destroy(&hdev->mbx_resp.mbx_mutex);
+}
+
+static int hclgevf_init_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int vectors;
+ int i;
+
+ if (hnae3_dev_roce_supported(hdev))
+ vectors = pci_alloc_irq_vectors(pdev,
+ hdev->roce_base_msix_offset + 1,
+ hdev->num_msi,
+ PCI_IRQ_MSIX);
+ else
+ vectors = pci_alloc_irq_vectors(pdev, HNAE3_MIN_VECTOR_NUM,
+ hdev->num_msi,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+
+ if (vectors < 0) {
+ dev_err(&pdev->dev,
+ "failed(%d) to allocate MSI/MSI-X vectors\n",
+ vectors);
+ return vectors;
+ }
+ if (vectors < hdev->num_msi)
+ dev_warn(&hdev->pdev->dev,
+ "requested %u MSI/MSI-X, but allocated %d MSI/MSI-X\n",
+ hdev->num_msi, vectors);
+
+ hdev->num_msi = vectors;
+ hdev->num_msi_left = vectors;
+
+ hdev->base_msi_vector = pdev->irq;
+ hdev->roce_base_vector = pdev->irq + hdev->roce_base_msix_offset;
+
+ hdev->vector_status = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(u16), GFP_KERNEL);
+ if (!hdev->vector_status) {
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < hdev->num_msi; i++)
+ hdev->vector_status[i] = HCLGEVF_INVALID_VPORT;
+
+ hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi,
+ sizeof(int), GFP_KERNEL);
+ if (!hdev->vector_irq) {
+ devm_kfree(&pdev->dev, hdev->vector_status);
+ pci_free_irq_vectors(pdev);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_msi(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ devm_kfree(&pdev->dev, hdev->vector_status);
+ devm_kfree(&pdev->dev, hdev->vector_irq);
+ pci_free_irq_vectors(pdev);
+}
+
+static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev)
+{
+ int ret;
+
+ hclgevf_get_misc_vector(hdev);
+
+ snprintf(hdev->misc_vector.name, HNAE3_INT_NAME_LEN, "%s-misc-%s",
+ HCLGEVF_NAME, pci_name(hdev->pdev));
+ ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle,
+ 0, hdev->misc_vector.name, hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev, "VF failed to request misc irq(%d)\n",
+ hdev->misc_vector.vector_irq);
+ return ret;
+ }
+
+ hclgevf_clear_event_cause(hdev, 0);
+
+ /* enable misc. vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, true);
+
+ return ret;
+}
+
+static void hclgevf_misc_irq_uninit(struct hclgevf_dev *hdev)
+{
+ /* disable misc vector(vector 0) */
+ hclgevf_enable_vector(&hdev->misc_vector, false);
+ synchronize_irq(hdev->misc_vector.vector_irq);
+ free_irq(hdev->misc_vector.vector_irq, hdev);
+ hclgevf_free_vector(hdev, 0);
+}
+
+static void hclgevf_info_show(struct hclgevf_dev *hdev)
+{
+ struct device *dev = &hdev->pdev->dev;
+
+ dev_info(dev, "VF info begin:\n");
+
+ dev_info(dev, "Task queue pairs numbers: %u\n", hdev->num_tqps);
+ dev_info(dev, "Desc num per TX queue: %u\n", hdev->num_tx_desc);
+ dev_info(dev, "Desc num per RX queue: %u\n", hdev->num_rx_desc);
+ dev_info(dev, "Numbers of vports: %u\n", hdev->num_alloc_vport);
+ dev_info(dev, "HW tc map: 0x%x\n", hdev->hw_tc_map);
+ dev_info(dev, "PF media type of this VF: %u\n",
+ hdev->hw.mac.media_type);
+
+ dev_info(dev, "VF info end.\n");
+}
+
+static int hclgevf_init_nic_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int rst_cnt = hdev->rst_stats.rst_cnt;
+ int ret;
+
+ ret = client->ops->init_instance(&hdev->nic);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ rst_cnt != hdev->rst_stats.rst_cnt) {
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+
+ client->ops->uninit_instance(&hdev->nic, 0);
+ return -EBUSY;
+ }
+
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ if (netif_msg_drv(&hdev->nic))
+ hclgevf_info_show(hdev);
+
+ return 0;
+}
+
+static int hclgevf_init_roce_client_instance(struct hnae3_ae_dev *ae_dev,
+ struct hnae3_client *client)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ if (!hnae3_dev_roce_supported(hdev) || !hdev->roce_client ||
+ !hdev->nic_client)
+ return 0;
+
+ ret = hclgevf_init_roce_base_info(hdev);
+ if (ret)
+ return ret;
+
+ ret = client->ops->init_instance(&hdev->roce);
+ if (ret)
+ return ret;
+
+ set_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
+ hnae3_set_client_init_flag(client, ae_dev, 1);
+
+ return 0;
+}
+
+static int hclgevf_init_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+ int ret;
+
+ switch (client->type) {
+ case HNAE3_CLIENT_KNIC:
+ hdev->nic_client = client;
+ hdev->nic.client = client;
+
+ ret = hclgevf_init_nic_client_instance(ae_dev, client);
+ if (ret)
+ goto clear_nic;
+
+ ret = hclgevf_init_roce_client_instance(ae_dev,
+ hdev->roce_client);
+ if (ret)
+ goto clear_roce;
+
+ break;
+ case HNAE3_CLIENT_ROCE:
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_client = client;
+ hdev->roce.client = client;
+ }
+
+ ret = hclgevf_init_roce_client_instance(ae_dev, client);
+ if (ret)
+ goto clear_roce;
+
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+
+clear_nic:
+ hdev->nic_client = NULL;
+ hdev->nic.client = NULL;
+ return ret;
+clear_roce:
+ hdev->roce_client = NULL;
+ hdev->roce.client = NULL;
+ return ret;
+}
+
+static void hclgevf_uninit_client_instance(struct hnae3_client *client,
+ struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ /* un-init roce, if it exists */
+ if (hdev->roce_client) {
+ while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGEVF_WAIT_RESET_DONE);
+ clear_bit(HCLGEVF_STATE_ROCE_REGISTERED, &hdev->state);
+
+ hdev->roce_client->ops->uninit_instance(&hdev->roce, 0);
+ hdev->roce_client = NULL;
+ hdev->roce.client = NULL;
+ }
+
+ /* un-init nic/unic, if this was not called by roce client */
+ if (client->ops->uninit_instance && hdev->nic_client &&
+ client->type != HNAE3_CLIENT_ROCE) {
+ while (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state))
+ msleep(HCLGEVF_WAIT_RESET_DONE);
+ clear_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state);
+
+ client->ops->uninit_instance(&hdev->nic, 0);
+ hdev->nic_client = NULL;
+ hdev->nic.client = NULL;
+ }
+}
+
+static int hclgevf_pci_init(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ struct hclgevf_hw *hw;
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to enable PCI device\n");
+ return ret;
+ }
+
+ ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ if (ret) {
+ dev_err(&pdev->dev, "can't set consistent PCI DMA, exiting");
+ goto err_disable_device;
+ }
+
+ ret = pci_request_regions(pdev, HCLGEVF_DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "PCI request regions failed %d\n", ret);
+ goto err_disable_device;
+ }
+
+ pci_set_master(pdev);
+ hw = &hdev->hw;
+ hw->hdev = hdev;
+ hw->io_base = pci_iomap(pdev, 2, 0);
+ if (!hw->io_base) {
+ dev_err(&pdev->dev, "can't map configuration register space\n");
+ ret = -ENOMEM;
+ goto err_clr_master;
+ }
+
+ return 0;
+
+err_clr_master:
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+
+ return ret;
+}
+
+static void hclgevf_pci_uninit(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+
+ pci_iounmap(pdev, hdev->hw.io_base);
+ pci_clear_master(pdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_query_res_cmd *req;
+ struct hclgevf_desc desc;
+ int ret;
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_QUERY_VF_RSRC, true);
+ ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "query vf resource failed, ret = %d.\n", ret);
+ return ret;
+ }
+
+ req = (struct hclgevf_query_res_cmd *)desc.data;
+
+ if (hnae3_dev_roce_supported(hdev)) {
+ hdev->roce_base_msix_offset =
+ hnae3_get_field(le16_to_cpu(req->msixcap_localid_ba_rocee),
+ HCLGEVF_MSIX_OFT_ROCEE_M,
+ HCLGEVF_MSIX_OFT_ROCEE_S);
+ hdev->num_roce_msix =
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ /* nic's msix numbers is always equals to the roce's. */
+ hdev->num_nic_msix = hdev->num_roce_msix;
+
+ /* VF should have NIC vectors and Roce vectors, NIC vectors
+ * are queued before Roce vectors. The offset is fixed to 64.
+ */
+ hdev->num_msi = hdev->num_roce_msix +
+ hdev->roce_base_msix_offset;
+ } else {
+ hdev->num_msi =
+ hnae3_get_field(le16_to_cpu(req->vf_intr_vector_number),
+ HCLGEVF_VEC_NUM_M, HCLGEVF_VEC_NUM_S);
+
+ hdev->num_nic_msix = hdev->num_msi;
+ }
+
+ if (hdev->num_nic_msix < HNAE3_MIN_VECTOR_NUM) {
+ dev_err(&hdev->pdev->dev,
+ "Just %u msi resources, not enough for vf(min:2).\n",
+ hdev->num_nic_msix);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void hclgevf_set_default_dev_specs(struct hclgevf_dev *hdev)
+{
+#define HCLGEVF_MAX_NON_TSO_BD_NUM 8U
+
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+
+ ae_dev->dev_specs.max_non_tso_bd_num =
+ HCLGEVF_MAX_NON_TSO_BD_NUM;
+ ae_dev->dev_specs.rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
+ ae_dev->dev_specs.rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+}
+
+static void hclgevf_parse_dev_specs(struct hclgevf_dev *hdev,
+ struct hclgevf_desc *desc)
+{
+ struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
+ struct hclgevf_dev_specs_0_cmd *req0;
+
+ req0 = (struct hclgevf_dev_specs_0_cmd *)desc[0].data;
+
+ ae_dev->dev_specs.max_non_tso_bd_num = req0->max_non_tso_bd_num;
+ ae_dev->dev_specs.rss_ind_tbl_size =
+ le16_to_cpu(req0->rss_ind_tbl_size);
+ ae_dev->dev_specs.rss_key_size = le16_to_cpu(req0->rss_key_size);
+}
+
+static void hclgevf_check_dev_specs(struct hclgevf_dev *hdev)
+{
+ struct hnae3_dev_specs *dev_specs = &hdev->ae_dev->dev_specs;
+
+ if (!dev_specs->max_non_tso_bd_num)
+ dev_specs->max_non_tso_bd_num = HCLGEVF_MAX_NON_TSO_BD_NUM;
+ if (!dev_specs->rss_ind_tbl_size)
+ dev_specs->rss_ind_tbl_size = HCLGEVF_RSS_IND_TBL_SIZE;
+ if (!dev_specs->rss_key_size)
+ dev_specs->rss_key_size = HCLGEVF_RSS_KEY_SIZE;
+}
+
+static int hclgevf_query_dev_specs(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_desc desc[HCLGEVF_QUERY_DEV_SPECS_BD_NUM];
+ int ret;
+ int i;
+
+ /* set default specifications as devices lower than version V3 do not
+ * support querying specifications from firmware.
+ */
+ if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) {
+ hclgevf_set_default_dev_specs(hdev);
+ return 0;
+ }
+
+ for (i = 0; i < HCLGEVF_QUERY_DEV_SPECS_BD_NUM - 1; i++) {
+ hclgevf_cmd_setup_basic_desc(&desc[i],
+ HCLGEVF_OPC_QUERY_DEV_SPECS, true);
+ desc[i].flag |= cpu_to_le16(HCLGEVF_CMD_FLAG_NEXT);
+ }
+ hclgevf_cmd_setup_basic_desc(&desc[i], HCLGEVF_OPC_QUERY_DEV_SPECS,
+ true);
+
+ ret = hclgevf_cmd_send(&hdev->hw, desc, HCLGEVF_QUERY_DEV_SPECS_BD_NUM);
+ if (ret)
+ return ret;
+
+ hclgevf_parse_dev_specs(hdev, desc);
+ hclgevf_check_dev_specs(hdev);
+
+ return 0;
+}
+
+static int hclgevf_pci_reset(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret = 0;
+
+ if ((hdev->reset_type == HNAE3_VF_FULL_RESET ||
+ hdev->reset_type == HNAE3_FLR_RESET) &&
+ test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ pci_set_master(pdev);
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed(%d) to init MSI/MSI-X\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret) {
+ hclgevf_uninit_msi(hdev);
+ dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n",
+ ret);
+ return ret;
+ }
+
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ }
+
+ return ret;
+}
+
+static int hclgevf_clear_vport_list(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_HANDLE_VF_TBL,
+ HCLGE_MBX_VPORT_LIST_CLEAR);
+ return hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+}
+
+static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = hclgevf_pci_reset(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "pci reset failed %d\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "cmd failed %d\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ return ret;
+ }
+
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ return ret;
+ }
+
+ set_bit(HCLGEVF_STATE_PROMISC_CHANGED, &hdev->state);
+
+ dev_info(&hdev->pdev->dev, "Reset done\n");
+
+ return 0;
+}
+
+static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
+{
+ struct pci_dev *pdev = hdev->pdev;
+ int ret;
+
+ ret = hclgevf_pci_init(hdev);
+ if (ret)
+ return ret;
+
+ ret = hclgevf_cmd_queue_init(hdev);
+ if (ret)
+ goto err_cmd_queue_init;
+
+ ret = hclgevf_cmd_init(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ /* Get vf resource */
+ ret = hclgevf_query_vf_resource(hdev);
+ if (ret)
+ goto err_cmd_init;
+
+ ret = hclgevf_query_dev_specs(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to query dev specifications, ret = %d\n", ret);
+ goto err_cmd_init;
+ }
+
+ ret = hclgevf_init_msi(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
+ goto err_cmd_init;
+ }
+
+ hclgevf_state_init(hdev);
+ hdev->reset_level = HNAE3_VF_FUNC_RESET;
+ hdev->reset_type = HNAE3_NONE_RESET;
+
+ ret = hclgevf_misc_irq_init(hdev);
+ if (ret)
+ goto err_misc_irq_init;
+
+ set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+
+ ret = hclgevf_configure(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_alloc_tqps(hdev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed(%d) to allocate TQPs\n", ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_set_handle_info(hdev);
+ if (ret)
+ goto err_config;
+
+ ret = hclgevf_config_gro(hdev, true);
+ if (ret)
+ goto err_config;
+
+ /* Initialize RSS for this VF */
+ hclgevf_rss_init_cfg(hdev);
+ ret = hclgevf_rss_init_hw(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize RSS\n", ret);
+ goto err_config;
+ }
+
+ /* ensure vf tbl list as empty before init*/
+ ret = hclgevf_clear_vport_list(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to clear tbl list configuration, ret = %d.\n",
+ ret);
+ goto err_config;
+ }
+
+ ret = hclgevf_init_vlan_config(hdev);
+ if (ret) {
+ dev_err(&hdev->pdev->dev,
+ "failed(%d) to initialize VLAN config\n", ret);
+ goto err_config;
+ }
+
+ hdev->last_reset_time = jiffies;
+ dev_info(&hdev->pdev->dev, "finished initializing %s driver\n",
+ HCLGEVF_DRIVER_NAME);
+
+ hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
+ timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
+
+ return 0;
+
+err_config:
+ hclgevf_misc_irq_uninit(hdev);
+err_misc_irq_init:
+ hclgevf_state_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+err_cmd_init:
+ hclgevf_cmd_uninit(hdev);
+err_cmd_queue_init:
+ hclgevf_pci_uninit(hdev);
+ clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state);
+ return ret;
+}
+
+static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
+{
+ struct hclge_vf_to_pf_msg send_msg;
+
+ hclgevf_state_uninit(hdev);
+
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_VF_UNINIT, 0);
+ hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+
+ if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
+ hclgevf_misc_irq_uninit(hdev);
+ hclgevf_uninit_msi(hdev);
+ }
+
+ hclgevf_cmd_uninit(hdev);
+ hclgevf_pci_uninit(hdev);
+ hclgevf_uninit_mac_list(hdev);
+}
+
+static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct pci_dev *pdev = ae_dev->pdev;
+ int ret;
+
+ ret = hclgevf_alloc_hdev(ae_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "hclge device allocation failed\n");
+ return ret;
+ }
+
+ ret = hclgevf_init_hdev(ae_dev->priv);
+ if (ret) {
+ dev_err(&pdev->dev, "hclge device initialization failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev)
+{
+ struct hclgevf_dev *hdev = ae_dev->priv;
+
+ hclgevf_uninit_hdev(hdev);
+ ae_dev->priv = NULL;
+}
+
+static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hnae3_knic_private_info *kinfo = &nic->kinfo;
+
+ return min_t(u32, hdev->rss_size_max,
+ hdev->num_tqps / kinfo->num_tc);
+}
+
+/**
+ * hclgevf_get_channels - Get the current channels enabled and max supported.
+ * @handle: hardware information for network interface
+ * @ch: ethtool channels structure
+ *
+ * We don't support separate tx and rx queues as channels. The other count
+ * represents how many queues are being used for control. max_combined counts
+ * how many queue pairs we can support. They may not be mapped 1 to 1 with
+ * q_vectors since we support a lot more queue pairs than q_vectors.
+ **/
+static void hclgevf_get_channels(struct hnae3_handle *handle,
+ struct ethtool_channels *ch)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ ch->max_combined = hclgevf_get_max_channels(hdev);
+ ch->other_count = 0;
+ ch->max_other = 0;
+ ch->combined_count = handle->kinfo.rss_size;
+}
+
+static void hclgevf_get_tqps_and_rss_info(struct hnae3_handle *handle,
+ u16 *alloc_tqps, u16 *max_rss_size)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ *alloc_tqps = hdev->num_tqps;
+ *max_rss_size = hdev->rss_size_max;
+}
+
+static void hclgevf_update_rss_size(struct hnae3_handle *handle,
+ u32 new_tqps_num)
+{
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ u16 max_rss_size;
+
+ kinfo->req_rss_size = new_tqps_num;
+
+ max_rss_size = min_t(u16, hdev->rss_size_max,
+ hdev->num_tqps / kinfo->num_tc);
+
+ /* Use the user's configuration when it is not larger than
+ * max_rss_size, otherwise, use the maximum specification value.
+ */
+ if (kinfo->req_rss_size != kinfo->rss_size && kinfo->req_rss_size &&
+ kinfo->req_rss_size <= max_rss_size)
+ kinfo->rss_size = kinfo->req_rss_size;
+ else if (kinfo->rss_size > max_rss_size ||
+ (!kinfo->req_rss_size && kinfo->rss_size < max_rss_size))
+ kinfo->rss_size = max_rss_size;
+
+ kinfo->num_tqps = kinfo->num_tc * kinfo->rss_size;
+}
+
+static int hclgevf_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
+ bool rxfh_configured)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ struct hnae3_knic_private_info *kinfo = &handle->kinfo;
+ u16 cur_rss_size = kinfo->rss_size;
+ u16 cur_tqps = kinfo->num_tqps;
+ u32 *rss_indir;
+ unsigned int i;
+ int ret;
+
+ hclgevf_update_rss_size(handle, new_tqps_num);
+
+ ret = hclgevf_set_rss_tc_mode(hdev, kinfo->rss_size);
+ if (ret)
+ return ret;
+
+ /* RSS indirection table has been configuared by user */
+ if (rxfh_configured)
+ goto out;
+
+ /* Reinitializes the rss indirect table according to the new RSS size */
+ rss_indir = kcalloc(HCLGEVF_RSS_IND_TBL_SIZE, sizeof(u32), GFP_KERNEL);
+ if (!rss_indir)
+ return -ENOMEM;
+
+ for (i = 0; i < HCLGEVF_RSS_IND_TBL_SIZE; i++)
+ rss_indir[i] = i % kinfo->rss_size;
+
+ hdev->rss_cfg.rss_size = kinfo->rss_size;
+
+ ret = hclgevf_set_rss(handle, rss_indir, NULL, 0);
+ if (ret)
+ dev_err(&hdev->pdev->dev, "set rss indir table fail, ret=%d\n",
+ ret);
+
+ kfree(rss_indir);
+
+out:
+ if (!ret)
+ dev_info(&hdev->pdev->dev,
+ "Channels changed, rss_size from %u to %u, tqps from %u to %u",
+ cur_rss_size, kinfo->rss_size,
+ cur_tqps, kinfo->rss_size * kinfo->num_tc);
+
+ return ret;
+}
+
+static int hclgevf_get_status(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->hw.mac.link;
+}
+
+static void hclgevf_get_ksettings_an_result(struct hnae3_handle *handle,
+ u8 *auto_neg, u32 *speed,
+ u8 *duplex)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ if (speed)
+ *speed = hdev->hw.mac.speed;
+ if (duplex)
+ *duplex = hdev->hw.mac.duplex;
+ if (auto_neg)
+ *auto_neg = AUTONEG_DISABLE;
+}
+
+void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
+ u8 duplex)
+{
+ hdev->hw.mac.speed = speed;
+ hdev->hw.mac.duplex = duplex;
+}
+
+static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hclgevf_config_gro(hdev, enable);
+}
+
+static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type,
+ u8 *module_type)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ if (media_type)
+ *media_type = hdev->hw.mac.media_type;
+
+ if (module_type)
+ *module_type = hdev->hw.mac.module_type;
+}
+
+static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING);
+}
+
+static bool hclgevf_get_cmdq_stat(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state);
+}
+
+static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state);
+}
+
+static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ return hdev->rst_stats.hw_rst_done_cnt;
+}
+
+static void hclgevf_get_link_mode(struct hnae3_handle *handle,
+ unsigned long *supported,
+ unsigned long *advertising)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ *supported = hdev->hw.mac.supported;
+ *advertising = hdev->hw.mac.advertising;
+}
+
+#define MAX_SEPARATE_NUM 4
+#define SEPARATOR_VALUE 0xFFFFFFFF
+#define REG_NUM_PER_LINE 4
+#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32))
+
+static int hclgevf_get_regs_len(struct hnae3_handle *handle)
+{
+ int cmdq_lines, common_lines, ring_lines, tqp_intr_lines;
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+
+ cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1;
+ tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1;
+
+ return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps +
+ tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE;
+}
+
+static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version,
+ void *data)
+{
+ struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
+ int i, j, reg_um, separator_num;
+ u32 *reg = data;
+
+ *version = hdev->fw_version;
+
+ /* fetching per-VF registers values from VF PCIe register space */
+ reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(common_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+
+ reg_um = sizeof(ring_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < hdev->num_tqps; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ ring_reg_addr_list[i] +
+ 0x200 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+
+ reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32);
+ separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE;
+ for (j = 0; j < hdev->num_msi_used - 1; j++) {
+ for (i = 0; i < reg_um; i++)
+ *reg++ = hclgevf_read_dev(&hdev->hw,
+ tqp_intr_reg_addr_list[i] +
+ 4 * j);
+ for (i = 0; i < separator_num; i++)
+ *reg++ = SEPARATOR_VALUE;
+ }
+}
+
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ u8 *port_base_vlan_info, u8 data_size)
+{
+ struct hnae3_handle *nic = &hdev->nic;
+ struct hclge_vf_to_pf_msg send_msg;
+ int ret;
+
+ rtnl_lock();
+
+ if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
+ test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
+ dev_warn(&hdev->pdev->dev,
+ "is resetting when updating port based vlan info\n");
+ rtnl_unlock();
+ return;
+ }
+
+ ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
+ if (ret) {
+ rtnl_unlock();
+ return;
+ }
+
+ /* send msg to PF and wait update port based vlan info */
+ hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
+ HCLGE_MBX_PORT_BASE_VLAN_CFG);
+ memcpy(send_msg.data, port_base_vlan_info, data_size);
+ ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
+ if (!ret) {
+ if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
+ nic->port_base_vlan_state = state;
+ else
+ nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
+ }
+
+ hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
+ rtnl_unlock();
+}
+
+static const struct hnae3_ae_ops hclgevf_ops = {
+ .init_ae_dev = hclgevf_init_ae_dev,
+ .uninit_ae_dev = hclgevf_uninit_ae_dev,
+ .flr_prepare = hclgevf_flr_prepare,
+ .flr_done = hclgevf_flr_done,
+ .init_client_instance = hclgevf_init_client_instance,
+ .uninit_client_instance = hclgevf_uninit_client_instance,
+ .start = hclgevf_ae_start,
+ .stop = hclgevf_ae_stop,
+ .client_start = hclgevf_client_start,
+ .client_stop = hclgevf_client_stop,
+ .map_ring_to_vector = hclgevf_map_ring_to_vector,
+ .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector,
+ .get_vector = hclgevf_get_vector,
+ .put_vector = hclgevf_put_vector,
+ .reset_queue = hclgevf_reset_tqp,
+ .get_mac_addr = hclgevf_get_mac_addr,
+ .set_mac_addr = hclgevf_set_mac_addr,
+ .add_uc_addr = hclgevf_add_uc_addr,
+ .rm_uc_addr = hclgevf_rm_uc_addr,
+ .add_mc_addr = hclgevf_add_mc_addr,
+ .rm_mc_addr = hclgevf_rm_mc_addr,
+ .get_stats = hclgevf_get_stats,
+ .update_stats = hclgevf_update_stats,
+ .get_strings = hclgevf_get_strings,
+ .get_sset_count = hclgevf_get_sset_count,
+ .get_rss_key_size = hclgevf_get_rss_key_size,
+ .get_rss_indir_size = hclgevf_get_rss_indir_size,
+ .get_rss = hclgevf_get_rss,
+ .set_rss = hclgevf_set_rss,
+ .get_rss_tuple = hclgevf_get_rss_tuple,
+ .set_rss_tuple = hclgevf_set_rss_tuple,
+ .get_tc_size = hclgevf_get_tc_size,
+ .get_fw_version = hclgevf_get_fw_version,
+ .set_vlan_filter = hclgevf_set_vlan_filter,
+ .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag,
+ .reset_event = hclgevf_reset_event,
+ .set_default_reset_request = hclgevf_set_def_reset_request,
+ .set_channels = hclgevf_set_channels,
+ .get_channels = hclgevf_get_channels,
+ .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info,
+ .get_regs_len = hclgevf_get_regs_len,
+ .get_regs = hclgevf_get_regs,
+ .get_status = hclgevf_get_status,
+ .get_ksettings_an_result = hclgevf_get_ksettings_an_result,
+ .get_media_type = hclgevf_get_media_type,
+ .get_hw_reset_stat = hclgevf_get_hw_reset_stat,
+ .ae_dev_resetting = hclgevf_ae_dev_resetting,
+ .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
+ .set_gro_en = hclgevf_gro_en,
+ .set_mtu = hclgevf_set_mtu,
+ .get_global_queue_id = hclgevf_get_qid_global,
+ .set_timer_task = hclgevf_set_timer_task,
+ .get_link_mode = hclgevf_get_link_mode,
+ .set_promisc_mode = hclgevf_set_promisc_mode,
+ .request_update_promisc_mode = hclgevf_request_update_promisc_mode,
+ .get_cmdq_stat = hclgevf_get_cmdq_stat,
+};
+
+static struct hnae3_ae_algo ae_algovf = {
+ .ops = &hclgevf_ops,
+ .pdev_id_table = ae_algovf_pci_tbl,
+};
+
+static int hclgevf_init(void)
+{
+ pr_info("%s is initializing\n", HCLGEVF_NAME);
+
+ hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME);
+ if (!hclgevf_wq) {
+ pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
+ return -ENOMEM;
+ }
+
+ hnae3_register_ae_algo(&ae_algovf);
+
+ return 0;
+}
+
+static void hclgevf_exit(void)
+{
+ hnae3_unregister_ae_algo(&ae_algovf);
+ destroy_workqueue(hclgevf_wq);
+}
+module_init(hclgevf_init);
+module_exit(hclgevf_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("HCLGEVF Driver");
+MODULE_VERSION(HCLGEVF_MOD_VERSION);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
new file mode 100644
index 000000000..9469af8c4
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h
@@ -0,0 +1,351 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2016-2017 Hisilicon Limited. */
+
+#ifndef __HCLGEVF_MAIN_H
+#define __HCLGEVF_MAIN_H
+#include <linux/fs.h>
+#include <linux/if_vlan.h>
+#include <linux/types.h>
+#include "hclge_mbx.h"
+#include "hclgevf_cmd.h"
+#include "hnae3.h"
+
+#define HCLGEVF_MOD_VERSION "1.0"
+#define HCLGEVF_DRIVER_NAME "hclgevf"
+
+#define HCLGEVF_MAX_VLAN_ID 4095
+#define HCLGEVF_MISC_VECTOR_NUM 0
+
+#define HCLGEVF_INVALID_VPORT 0xffff
+#define HCLGEVF_GENERAL_TASK_INTERVAL 5
+#define HCLGEVF_KEEP_ALIVE_TASK_INTERVAL 2
+
+/* This number in actual depends upon the total number of VFs
+ * created by physical function. But the maximum number of
+ * possible vector-per-VF is {VFn(1-32), VECTn(32 + 1)}.
+ */
+#define HCLGEVF_MAX_VF_VECTOR_NUM (32 + 1)
+
+#define HCLGEVF_VECTOR_REG_BASE 0x20000
+#define HCLGEVF_MISC_VECTOR_REG_BASE 0x20400
+#define HCLGEVF_VECTOR_REG_OFFSET 0x4
+#define HCLGEVF_VECTOR_VF_OFFSET 0x100000
+
+/* bar registers for cmdq */
+#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000
+#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004
+#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008
+#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010
+#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014
+#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018
+#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C
+#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020
+#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024
+#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028
+#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108
+#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C
+
+/* bar registers for common func */
+#define HCLGEVF_GRO_EN_REG 0x28000
+
+/* bar registers for rcb */
+#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000
+#define HCLGEVF_RING_RX_ADDR_H_REG 0x80004
+#define HCLGEVF_RING_RX_BD_NUM_REG 0x80008
+#define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C
+#define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014
+#define HCLGEVF_RING_RX_TAIL_REG 0x80018
+#define HCLGEVF_RING_RX_HEAD_REG 0x8001C
+#define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020
+#define HCLGEVF_RING_RX_OFFSET_REG 0x80024
+#define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028
+#define HCLGEVF_RING_RX_STASH_REG 0x80030
+#define HCLGEVF_RING_RX_BD_ERR_REG 0x80034
+#define HCLGEVF_RING_TX_ADDR_L_REG 0x80040
+#define HCLGEVF_RING_TX_ADDR_H_REG 0x80044
+#define HCLGEVF_RING_TX_BD_NUM_REG 0x80048
+#define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C
+#define HCLGEVF_RING_TX_TC_REG 0x80050
+#define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054
+#define HCLGEVF_RING_TX_TAIL_REG 0x80058
+#define HCLGEVF_RING_TX_HEAD_REG 0x8005C
+#define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060
+#define HCLGEVF_RING_TX_OFFSET_REG 0x80064
+#define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068
+#define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070
+#define HCLGEVF_RING_TX_BD_ERR_REG 0x80074
+#define HCLGEVF_RING_EN_REG 0x80090
+
+/* bar registers for tqp interrupt */
+#define HCLGEVF_TQP_INTR_CTRL_REG 0x20000
+#define HCLGEVF_TQP_INTR_GL0_REG 0x20100
+#define HCLGEVF_TQP_INTR_GL1_REG 0x20200
+#define HCLGEVF_TQP_INTR_GL2_REG 0x20300
+#define HCLGEVF_TQP_INTR_RL_REG 0x20900
+
+/* Vector0 interrupt CMDQ event source register(RW) */
+#define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100
+/* Vector0 interrupt CMDQ event status register(RO) */
+#define HCLGEVF_VECTOR0_CMDQ_STATE_REG 0x27104
+/* CMDQ register bits for RX event(=MBX event) */
+#define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1
+/* RST register bits for RESET event */
+#define HCLGEVF_VECTOR0_RST_INT_B 2
+
+#define HCLGEVF_TQP_RESET_TRY_TIMES 10
+/* Reset related Registers */
+#define HCLGEVF_RST_ING 0x20C00
+#define HCLGEVF_FUN_RST_ING_BIT BIT(0)
+#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5)
+#define HCLGEVF_CORE_RST_ING_BIT BIT(6)
+#define HCLGEVF_IMP_RST_ING_BIT BIT(7)
+#define HCLGEVF_RST_ING_BITS \
+ (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \
+ HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT)
+
+#define HCLGEVF_VF_RST_ING 0x07008
+#define HCLGEVF_VF_RST_ING_BIT BIT(16)
+
+#define HCLGEVF_WAIT_RESET_DONE 100
+
+#define HCLGEVF_RSS_IND_TBL_SIZE 512
+#define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff
+#define HCLGEVF_RSS_KEY_SIZE 40
+#define HCLGEVF_RSS_HASH_ALGO_TOEPLITZ 0
+#define HCLGEVF_RSS_HASH_ALGO_SIMPLE 1
+#define HCLGEVF_RSS_HASH_ALGO_SYMMETRIC 2
+#define HCLGEVF_RSS_HASH_ALGO_MASK 0xf
+#define HCLGEVF_RSS_CFG_TBL_NUM \
+ (HCLGEVF_RSS_IND_TBL_SIZE / HCLGEVF_RSS_CFG_TBL_SIZE)
+#define HCLGEVF_RSS_INPUT_TUPLE_OTHER GENMASK(3, 0)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP GENMASK(4, 0)
+#define HCLGEVF_D_PORT_BIT BIT(0)
+#define HCLGEVF_S_PORT_BIT BIT(1)
+#define HCLGEVF_D_IP_BIT BIT(2)
+#define HCLGEVF_S_IP_BIT BIT(3)
+#define HCLGEVF_V_TAG_BIT BIT(4)
+#define HCLGEVF_RSS_INPUT_TUPLE_SCTP_NO_PORT \
+ (HCLGEVF_D_IP_BIT | HCLGEVF_S_IP_BIT | HCLGEVF_V_TAG_BIT)
+
+#define HCLGEVF_STATS_TIMER_INTERVAL 36U
+
+enum hclgevf_evt_cause {
+ HCLGEVF_VECTOR0_EVENT_RST,
+ HCLGEVF_VECTOR0_EVENT_MBX,
+ HCLGEVF_VECTOR0_EVENT_OTHER,
+};
+
+/* states of hclgevf device & tasks */
+enum hclgevf_states {
+ /* device states */
+ HCLGEVF_STATE_DOWN,
+ HCLGEVF_STATE_DISABLED,
+ HCLGEVF_STATE_IRQ_INITED,
+ HCLGEVF_STATE_REMOVING,
+ HCLGEVF_STATE_NIC_REGISTERED,
+ HCLGEVF_STATE_ROCE_REGISTERED,
+ /* task states */
+ HCLGEVF_STATE_RST_SERVICE_SCHED,
+ HCLGEVF_STATE_RST_HANDLING,
+ HCLGEVF_STATE_MBX_SERVICE_SCHED,
+ HCLGEVF_STATE_MBX_HANDLING,
+ HCLGEVF_STATE_CMD_DISABLE,
+ HCLGEVF_STATE_LINK_UPDATING,
+ HCLGEVF_STATE_PROMISC_CHANGED,
+ HCLGEVF_STATE_RST_FAIL,
+};
+
+struct hclgevf_mac {
+ u8 media_type;
+ u8 module_type;
+ u8 mac_addr[ETH_ALEN];
+ int link;
+ u8 duplex;
+ u32 speed;
+ u64 supported;
+ u64 advertising;
+};
+
+struct hclgevf_hw {
+ void __iomem *io_base;
+ int num_vec;
+ struct hclgevf_cmq cmq;
+ struct hclgevf_mac mac;
+ void *hdev; /* hchgevf device it is part of */
+};
+
+/* TQP stats */
+struct hlcgevf_tqp_stats {
+ /* query_tqp_tx_queue_statistics ,opcode id: 0x0B03 */
+ u64 rcb_tx_ring_pktnum_rcd; /* 32bit */
+ /* query_tqp_rx_queue_statistics ,opcode id: 0x0B13 */
+ u64 rcb_rx_ring_pktnum_rcd; /* 32bit */
+};
+
+struct hclgevf_tqp {
+ struct device *dev; /* device for DMA mapping */
+ struct hnae3_queue q;
+ struct hlcgevf_tqp_stats tqp_stats;
+ u16 index; /* global index in a NIC controller */
+
+ bool alloced;
+};
+
+struct hclgevf_cfg {
+ u8 vmdq_vport_num;
+ u8 tc_num;
+ u16 tqp_desc_num;
+ u16 rx_buf_len;
+ u8 phy_addr;
+ u8 media_type;
+ u8 mac_addr[ETH_ALEN];
+ u32 numa_node_map;
+};
+
+struct hclgevf_rss_tuple_cfg {
+ u8 ipv4_tcp_en;
+ u8 ipv4_udp_en;
+ u8 ipv4_sctp_en;
+ u8 ipv4_fragment_en;
+ u8 ipv6_tcp_en;
+ u8 ipv6_udp_en;
+ u8 ipv6_sctp_en;
+ u8 ipv6_fragment_en;
+};
+
+struct hclgevf_rss_cfg {
+ u8 rss_hash_key[HCLGEVF_RSS_KEY_SIZE]; /* user configured hash keys */
+ u32 hash_algo;
+ u32 rss_size;
+ u8 hw_tc_map;
+ u8 rss_indirection_tbl[HCLGEVF_RSS_IND_TBL_SIZE]; /* shadow table */
+ struct hclgevf_rss_tuple_cfg rss_tuple_sets;
+};
+
+struct hclgevf_misc_vector {
+ u8 __iomem *addr;
+ int vector_irq;
+ char name[HNAE3_INT_NAME_LEN];
+};
+
+struct hclgevf_rst_stats {
+ u32 rst_cnt; /* the number of reset */
+ u32 vf_func_rst_cnt; /* the number of VF function reset */
+ u32 flr_rst_cnt; /* the number of FLR */
+ u32 vf_rst_cnt; /* the number of VF reset */
+ u32 rst_done_cnt; /* the number of reset completed */
+ u32 hw_rst_done_cnt; /* the number of HW reset completed */
+ u32 rst_fail_cnt; /* the number of VF reset fail */
+};
+
+enum HCLGEVF_MAC_ADDR_TYPE {
+ HCLGEVF_MAC_ADDR_UC,
+ HCLGEVF_MAC_ADDR_MC
+};
+
+enum HCLGEVF_MAC_NODE_STATE {
+ HCLGEVF_MAC_TO_ADD,
+ HCLGEVF_MAC_TO_DEL,
+ HCLGEVF_MAC_ACTIVE
+};
+
+struct hclgevf_mac_addr_node {
+ struct list_head node;
+ enum HCLGEVF_MAC_NODE_STATE state;
+ u8 mac_addr[ETH_ALEN];
+};
+
+struct hclgevf_mac_table_cfg {
+ spinlock_t mac_list_lock; /* protect mac address need to add/detele */
+ struct list_head uc_mac_list;
+ struct list_head mc_mac_list;
+};
+
+struct hclgevf_dev {
+ struct pci_dev *pdev;
+ struct hnae3_ae_dev *ae_dev;
+ struct hclgevf_hw hw;
+ struct hclgevf_misc_vector misc_vector;
+ struct hclgevf_rss_cfg rss_cfg;
+ unsigned long state;
+ unsigned long flr_state;
+ unsigned long default_reset_request;
+ unsigned long last_reset_time;
+ enum hnae3_reset_type reset_level;
+ unsigned long reset_pending;
+ enum hnae3_reset_type reset_type;
+ struct timer_list reset_timer;
+
+#define HCLGEVF_RESET_REQUESTED 0
+#define HCLGEVF_RESET_PENDING 1
+ unsigned long reset_state; /* requested, pending */
+ struct hclgevf_rst_stats rst_stats;
+ u32 reset_attempts;
+ struct semaphore reset_sem; /* protect reset process */
+
+ u32 fw_version;
+ u16 num_tqps; /* num task queue pairs of this VF */
+
+ u16 alloc_rss_size; /* allocated RSS task queue */
+ u16 rss_size_max; /* HW defined max RSS task queue */
+
+ u16 num_alloc_vport; /* num vports this driver supports */
+ u32 numa_node_mask;
+ u16 rx_buf_len;
+ u16 num_tx_desc; /* desc num of per tx queue */
+ u16 num_rx_desc; /* desc num of per rx queue */
+ u8 hw_tc_map;
+ u8 has_pf_mac;
+
+ u16 num_msi;
+ u16 num_msi_left;
+ u16 num_msi_used;
+ u16 num_nic_msix; /* Num of nic vectors for this VF */
+ u16 num_roce_msix; /* Num of roce vectors for this VF */
+ u16 roce_base_msix_offset;
+ int roce_base_vector;
+ u32 base_msi_vector;
+ u16 *vector_status;
+ int *vector_irq;
+
+ unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)];
+
+ struct hclgevf_mac_table_cfg mac_table;
+
+ bool mbx_event_pending;
+ struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */
+ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */
+
+ struct delayed_work service_task;
+
+ struct hclgevf_tqp *htqp;
+
+ struct hnae3_handle nic;
+ struct hnae3_handle roce;
+
+ struct hnae3_client *nic_client;
+ struct hnae3_client *roce_client;
+ u32 flag;
+ unsigned long serv_processed_cnt;
+ unsigned long last_serv_processed;
+};
+
+static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev)
+{
+ return !!hdev->reset_pending;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
+ u8 *resp_data, u16 resp_len);
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev);
+void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev);
+
+void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state);
+void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
+ u8 duplex);
+void hclgevf_reset_task_schedule(struct hclgevf_dev *hdev);
+void hclgevf_mbx_task_schedule(struct hclgevf_dev *hdev);
+void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
+ u8 *port_base_vlan_info, u8 data_size);
+#endif
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
new file mode 100644
index 000000000..b8e5ca670
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c
@@ -0,0 +1,351 @@
+// SPDX-License-Identifier: GPL-2.0+
+// Copyright (c) 2016-2017 Hisilicon Limited.
+
+#include "hclge_mbx.h"
+#include "hclgevf_main.h"
+#include "hnae3.h"
+
+#define CREATE_TRACE_POINTS
+#include "hclgevf_trace.h"
+
+static int hclgevf_resp_to_errno(u16 resp_code)
+{
+ return resp_code ? -resp_code : 0;
+}
+
+static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev)
+{
+ /* this function should be called with mbx_resp.mbx_mutex held
+ * to prtect the received_response from race condition
+ */
+ hdev->mbx_resp.received_resp = false;
+ hdev->mbx_resp.origin_mbx_msg = 0;
+ hdev->mbx_resp.resp_status = 0;
+ memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE);
+}
+
+/* hclgevf_get_mbx_resp: used to get a response from PF after VF sends a mailbox
+ * message to PF.
+ * @hdev: pointer to struct hclgevf_dev
+ * @resp_msg: pointer to store the original message type and response status
+ * @len: the resp_msg data array length.
+ */
+static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
+ u8 *resp_data, u16 resp_len)
+{
+#define HCLGEVF_MAX_TRY_TIMES 500
+#define HCLGEVF_SLEEP_USECOND 1000
+ struct hclgevf_mbx_resp_status *mbx_resp;
+ u16 r_code0, r_code1;
+ int i = 0;
+
+ if (resp_len > HCLGE_MBX_MAX_RESP_DATA_SIZE) {
+ dev_err(&hdev->pdev->dev,
+ "VF mbx response len(=%u) exceeds maximum(=%u)\n",
+ resp_len,
+ HCLGE_MBX_MAX_RESP_DATA_SIZE);
+ return -EINVAL;
+ }
+
+ while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state))
+ return -EIO;
+
+ usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2);
+ i++;
+ }
+
+ if (i >= HCLGEVF_MAX_TRY_TIMES) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
+ code0, code1, hdev->mbx_resp.received_resp, i);
+ return -EIO;
+ }
+
+ mbx_resp = &hdev->mbx_resp;
+ r_code0 = (u16)(mbx_resp->origin_mbx_msg >> 16);
+ r_code1 = (u16)(mbx_resp->origin_mbx_msg & 0xff);
+
+ if (mbx_resp->resp_status)
+ return mbx_resp->resp_status;
+
+ if (resp_data)
+ memcpy(resp_data, &mbx_resp->additional_info[0], resp_len);
+
+ hclgevf_reset_mbx_resp_status(hdev);
+
+ if (!(r_code0 == code0 && r_code1 == code1 && !mbx_resp->resp_status)) {
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp code(code0=%u,code1=%u), %d\n",
+ code0, code1, mbx_resp->resp_status);
+ dev_err(&hdev->pdev->dev,
+ "VF could not match resp r_code(r_code0=%u,r_code1=%u)\n",
+ r_code0, r_code1);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev,
+ struct hclge_vf_to_pf_msg *send_msg, bool need_resp,
+ u8 *resp_data, u16 resp_len)
+{
+ struct hclge_mbx_vf_to_pf_cmd *req;
+ struct hclgevf_desc desc;
+ int status;
+
+ req = (struct hclge_mbx_vf_to_pf_cmd *)desc.data;
+
+ if (!send_msg) {
+ dev_err(&hdev->pdev->dev,
+ "failed to send mbx, msg is NULL\n");
+ return -EINVAL;
+ }
+
+ hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_MBX_VF_TO_PF, false);
+ if (need_resp)
+ hnae3_set_bit(req->mbx_need_resp, HCLGE_MBX_NEED_RESP_B, 1);
+
+ memcpy(&req->msg, send_msg, sizeof(struct hclge_vf_to_pf_msg));
+
+ if (test_bit(HCLGEVF_STATE_NIC_REGISTERED, &hdev->state))
+ trace_hclge_vf_mbx_send(hdev, req);
+
+ /* synchronous send */
+ if (need_resp) {
+ mutex_lock(&hdev->mbx_resp.mbx_mutex);
+ hclgevf_reset_mbx_resp_status(hdev);
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ return status;
+ }
+
+ status = hclgevf_get_mbx_resp(hdev, send_msg->code,
+ send_msg->subcode, resp_data,
+ resp_len);
+ mutex_unlock(&hdev->mbx_resp.mbx_mutex);
+ } else {
+ /* asynchronous send */
+ status = hclgevf_cmd_send(&hdev->hw, &desc, 1);
+ if (status) {
+ dev_err(&hdev->pdev->dev,
+ "VF failed(=%d) to send mbx message to PF\n",
+ status);
+ return status;
+ }
+ }
+
+ return status;
+}
+
+static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw)
+{
+ u32 tail = hclgevf_read_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG);
+
+ return tail == hw->cmq.crq.next_to_use;
+}
+
+void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
+{
+ struct hclgevf_mbx_resp_status *resp;
+ struct hclge_mbx_pf_to_vf_cmd *req;
+ struct hclgevf_cmq_ring *crq;
+ struct hclgevf_desc *desc;
+ u16 *msg_q;
+ u16 flag;
+ u8 *temp;
+ int i;
+
+ resp = &hdev->mbx_resp;
+ crq = &hdev->hw.cmq.crq;
+
+ while (!hclgevf_cmd_crq_empty(&hdev->hw)) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev, "vf crq need init\n");
+ return;
+ }
+
+ desc = &crq->desc[crq->next_to_use];
+ req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data;
+
+ flag = le16_to_cpu(crq->desc[crq->next_to_use].flag);
+ if (unlikely(!hnae3_get_bit(flag, HCLGEVF_CMDQ_RX_OUTVLD_B))) {
+ dev_warn(&hdev->pdev->dev,
+ "dropped invalid mailbox message, code = %u\n",
+ req->msg.code);
+
+ /* dropping/not processing this invalid message */
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ continue;
+ }
+
+ trace_hclge_vf_mbx_get(hdev, req);
+
+ /* synchronous messages are time critical and need preferential
+ * treatment. Therefore, we need to acknowledge all the sync
+ * responses as quickly as possible so that waiting tasks do not
+ * timeout and simultaneously queue the async messages for later
+ * prcessing in context of mailbox task i.e. the slow path.
+ */
+ switch (req->msg.code) {
+ case HCLGE_MBX_PF_VF_RESP:
+ if (resp->received_resp)
+ dev_warn(&hdev->pdev->dev,
+ "VF mbx resp flag not clear(%u)\n",
+ req->msg.vf_mbx_msg_code);
+ resp->received_resp = true;
+
+ resp->origin_mbx_msg =
+ (req->msg.vf_mbx_msg_code << 16);
+ resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode;
+ resp->resp_status =
+ hclgevf_resp_to_errno(req->msg.resp_status);
+
+ temp = (u8 *)req->msg.resp_data;
+ for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) {
+ resp->additional_info[i] = *temp;
+ temp++;
+ }
+ break;
+ case HCLGE_MBX_LINK_STAT_CHANGE:
+ case HCLGE_MBX_ASSERTING_RESET:
+ case HCLGE_MBX_LINK_STAT_MODE:
+ case HCLGE_MBX_PUSH_VLAN_INFO:
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
+ /* set this mbx event as pending. This is required as we
+ * might loose interrupt event when mbx task is busy
+ * handling. This shall be cleared when mbx task just
+ * enters handling state.
+ */
+ hdev->mbx_event_pending = true;
+
+ /* we will drop the async msg if we find ARQ as full
+ * and continue with next message
+ */
+ if (atomic_read(&hdev->arq.count) >=
+ HCLGE_MBX_MAX_ARQ_MSG_NUM) {
+ dev_warn(&hdev->pdev->dev,
+ "Async Q full, dropping msg(%u)\n",
+ req->msg.code);
+ break;
+ }
+
+ /* tail the async message in arq */
+ msg_q = hdev->arq.msg_q[hdev->arq.tail];
+ memcpy(&msg_q[0], &req->msg,
+ HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
+ hclge_mbx_tail_ptr_move_arq(hdev->arq);
+ atomic_inc(&hdev->arq.count);
+
+ hclgevf_mbx_task_schedule(hdev);
+
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "VF received unsupported(%u) mbx msg from PF\n",
+ req->msg.code);
+ break;
+ }
+ crq->desc[crq->next_to_use].flag = 0;
+ hclge_mbx_ring_ptr_move_crq(crq);
+ }
+
+ /* Write back CMDQ_RQ header pointer, M7 need this pointer */
+ hclgevf_write_dev(&hdev->hw, HCLGEVF_NIC_CRQ_HEAD_REG,
+ crq->next_to_use);
+}
+
+static void hclgevf_parse_promisc_info(struct hclgevf_dev *hdev,
+ u16 promisc_info)
+{
+ if (!promisc_info)
+ dev_info(&hdev->pdev->dev,
+ "Promisc mode is closed by host for being untrusted.\n");
+}
+
+void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
+{
+ enum hnae3_reset_type reset_type;
+ u16 link_status, state;
+ u16 *msg_q, *vlan_info;
+ u8 duplex;
+ u32 speed;
+ u32 tail;
+ u8 idx;
+
+ /* we can safely clear it now as we are at start of the async message
+ * processing
+ */
+ hdev->mbx_event_pending = false;
+
+ tail = hdev->arq.tail;
+
+ /* process all the async queue messages */
+ while (tail != hdev->arq.head) {
+ if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) {
+ dev_info(&hdev->pdev->dev,
+ "vf crq need init in async\n");
+ return;
+ }
+
+ msg_q = hdev->arq.msg_q[hdev->arq.head];
+
+ switch (msg_q[0]) {
+ case HCLGE_MBX_LINK_STAT_CHANGE:
+ link_status = msg_q[1];
+ memcpy(&speed, &msg_q[2], sizeof(speed));
+ duplex = (u8)msg_q[4];
+
+ /* update upper layer with new link link status */
+ hclgevf_update_link_status(hdev, link_status);
+ hclgevf_update_speed_duplex(hdev, speed, duplex);
+
+ break;
+ case HCLGE_MBX_LINK_STAT_MODE:
+ idx = (u8)msg_q[1];
+ if (idx)
+ memcpy(&hdev->hw.mac.supported, &msg_q[2],
+ sizeof(unsigned long));
+ else
+ memcpy(&hdev->hw.mac.advertising, &msg_q[2],
+ sizeof(unsigned long));
+ break;
+ case HCLGE_MBX_ASSERTING_RESET:
+ /* PF has asserted reset hence VF should go in pending
+ * state and poll for the hardware reset status till it
+ * has been completely reset. After this stack should
+ * eventually be re-initialized.
+ */
+ reset_type = (enum hnae3_reset_type)msg_q[1];
+ set_bit(reset_type, &hdev->reset_pending);
+ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state);
+ hclgevf_reset_task_schedule(hdev);
+
+ break;
+ case HCLGE_MBX_PUSH_VLAN_INFO:
+ state = msg_q[1];
+ vlan_info = &msg_q[1];
+ hclgevf_update_port_base_vlan_info(hdev, state,
+ (u8 *)vlan_info, 8);
+ break;
+ case HCLGE_MBX_PUSH_PROMISC_INFO:
+ hclgevf_parse_promisc_info(hdev, msg_q[1]);
+ break;
+ default:
+ dev_err(&hdev->pdev->dev,
+ "fetched unsupported(%u) message from arq\n",
+ msg_q[0]);
+ break;
+ }
+
+ hclge_mbx_head_ptr_move_arq(hdev->arq);
+ atomic_dec(&hdev->arq.count);
+ msg_q = hdev->arq.msg_q[hdev->arq.head];
+ }
+}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
new file mode 100644
index 000000000..e4bfb6191
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_trace.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* Copyright (c) 2018-2019 Hisilicon Limited. */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hns3
+
+#if !defined(_HCLGEVF_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _HCLGEVF_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#define VF_GET_MBX_LEN (sizeof(struct hclge_mbx_pf_to_vf_cmd) / sizeof(u32))
+#define VF_SEND_MBX_LEN (sizeof(struct hclge_mbx_vf_to_pf_cmd) / sizeof(u32))
+
+TRACE_EVENT(hclge_vf_mbx_get,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_pf_to_vf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u16, code)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_GET_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->dest_vfid;
+ __entry->code = req->msg.code;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_pf_to_vf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code,
+ __print_array(__entry->mbx_data, VF_GET_MBX_LEN, sizeof(u32))
+ )
+);
+
+TRACE_EVENT(hclge_vf_mbx_send,
+ TP_PROTO(
+ struct hclgevf_dev *hdev,
+ struct hclge_mbx_vf_to_pf_cmd *req),
+ TP_ARGS(hdev, req),
+
+ TP_STRUCT__entry(
+ __field(u8, vfid)
+ __field(u8, code)
+ __field(u8, subcode)
+ __string(pciname, pci_name(hdev->pdev))
+ __string(devname, &hdev->nic.kinfo.netdev->name)
+ __array(u32, mbx_data, VF_SEND_MBX_LEN)
+ ),
+
+ TP_fast_assign(
+ __entry->vfid = req->mbx_src_vfid;
+ __entry->code = req->msg.code;
+ __entry->subcode = req->msg.subcode;
+ __assign_str(pciname, pci_name(hdev->pdev));
+ __assign_str(devname, &hdev->nic.kinfo.netdev->name);
+ memcpy(__entry->mbx_data, req,
+ sizeof(struct hclge_mbx_vf_to_pf_cmd));
+ ),
+
+ TP_printk(
+ "%s %s vfid:%u code:%u subcode:%u data:%s",
+ __get_str(pciname), __get_str(devname), __entry->vfid,
+ __entry->code, __entry->subcode,
+ __print_array(__entry->mbx_data, VF_SEND_MBX_LEN, sizeof(u32))
+ )
+);
+
+#endif /* _HCLGEVF_TRACE_H_ */
+
+/* This must be outside ifdef _HCLGEVF_TRACE_H */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE hclgevf_trace
+#include <trace/define_trace.h>
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
new file mode 100644
index 000000000..883d0d7c6
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -0,0 +1,574 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (c) 2014-2015 Hisilicon Limited.
+ */
+
+#include <linux/acpi.h>
+#include <linux/errno.h>
+#include <linux/etherdevice.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/of_address.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_platform.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define MDIO_DRV_NAME "Hi-HNS_MDIO"
+#define MDIO_BUS_NAME "Hisilicon MII Bus"
+
+#define MDIO_TIMEOUT 1000000
+
+struct hns_mdio_sc_reg {
+ u16 mdio_clk_en;
+ u16 mdio_clk_dis;
+ u16 mdio_reset_req;
+ u16 mdio_reset_dreq;
+ u16 mdio_clk_st;
+ u16 mdio_reset_st;
+};
+
+struct hns_mdio_device {
+ u8 __iomem *vbase; /* mdio reg base address */
+ struct regmap *subctrl_vbase;
+ struct hns_mdio_sc_reg sc_reg;
+};
+
+/* mdio reg */
+#define MDIO_COMMAND_REG 0x0
+#define MDIO_ADDR_REG 0x4
+#define MDIO_WDATA_REG 0x8
+#define MDIO_RDATA_REG 0xc
+#define MDIO_STA_REG 0x10
+
+/* cfg phy bit map */
+#define MDIO_CMD_DEVAD_M 0x1f
+#define MDIO_CMD_DEVAD_S 0
+#define MDIO_CMD_PRTAD_M 0x1f
+#define MDIO_CMD_PRTAD_S 5
+#define MDIO_CMD_OP_S 10
+#define MDIO_CMD_ST_S 12
+#define MDIO_CMD_START_B 14
+
+#define MDIO_ADDR_DATA_M 0xffff
+#define MDIO_ADDR_DATA_S 0
+
+#define MDIO_WDATA_DATA_M 0xffff
+#define MDIO_WDATA_DATA_S 0
+
+#define MDIO_RDATA_DATA_M 0xffff
+#define MDIO_RDATA_DATA_S 0
+
+#define MDIO_STATE_STA_B 0
+
+enum mdio_st_clause {
+ MDIO_ST_CLAUSE_45 = 0,
+ MDIO_ST_CLAUSE_22
+};
+
+enum mdio_c22_op_seq {
+ MDIO_C22_WRITE = 1,
+ MDIO_C22_READ = 2
+};
+
+enum mdio_c45_op_seq {
+ MDIO_C45_WRITE_ADDR = 0,
+ MDIO_C45_WRITE_DATA,
+ MDIO_C45_READ_INCREMENT,
+ MDIO_C45_READ
+};
+
+/* peri subctrl reg */
+#define MDIO_SC_CLK_EN 0x338
+#define MDIO_SC_CLK_DIS 0x33C
+#define MDIO_SC_RESET_REQ 0xA38
+#define MDIO_SC_RESET_DREQ 0xA3C
+#define MDIO_SC_CLK_ST 0x531C
+#define MDIO_SC_RESET_ST 0x5A1C
+
+static void mdio_write_reg(u8 __iomem *base, u32 reg, u32 value)
+{
+ writel_relaxed(value, base + reg);
+}
+
+#define MDIO_WRITE_REG(a, reg, value) \
+ mdio_write_reg((a)->vbase, (reg), (value))
+
+static u32 mdio_read_reg(u8 __iomem *base, u32 reg)
+{
+ return readl_relaxed(base + reg);
+}
+
+#define mdio_set_field(origin, mask, shift, val) \
+ do { \
+ (origin) &= (~((mask) << (shift))); \
+ (origin) |= (((val) & (mask)) << (shift)); \
+ } while (0)
+
+#define mdio_get_field(origin, mask, shift) (((origin) >> (shift)) & (mask))
+
+static void mdio_set_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift,
+ u32 val)
+{
+ u32 origin = mdio_read_reg(base, reg);
+
+ mdio_set_field(origin, mask, shift, val);
+ mdio_write_reg(base, reg, origin);
+}
+
+#define MDIO_SET_REG_FIELD(dev, reg, mask, shift, val) \
+ mdio_set_reg_field((dev)->vbase, (reg), (mask), (shift), (val))
+
+static u32 mdio_get_reg_field(u8 __iomem *base, u32 reg, u32 mask, u32 shift)
+{
+ u32 origin;
+
+ origin = mdio_read_reg(base, reg);
+ return mdio_get_field(origin, mask, shift);
+}
+
+#define MDIO_GET_REG_FIELD(dev, reg, mask, shift) \
+ mdio_get_reg_field((dev)->vbase, (reg), (mask), (shift))
+
+#define MDIO_GET_REG_BIT(dev, reg, bit) \
+ mdio_get_reg_field((dev)->vbase, (reg), 0x1ull, (bit))
+
+#define MDIO_CHECK_SET_ST 1
+#define MDIO_CHECK_CLR_ST 0
+
+static int mdio_sc_cfg_reg_write(struct hns_mdio_device *mdio_dev,
+ u32 cfg_reg, u32 set_val,
+ u32 st_reg, u32 st_msk, u8 check_st)
+{
+ u32 time_cnt;
+ u32 reg_value;
+ int ret;
+
+ regmap_write(mdio_dev->subctrl_vbase, cfg_reg, set_val);
+
+ for (time_cnt = MDIO_TIMEOUT; time_cnt; time_cnt--) {
+ ret = regmap_read(mdio_dev->subctrl_vbase, st_reg, &reg_value);
+ if (ret)
+ return ret;
+
+ reg_value &= st_msk;
+ if ((!!check_st) == (!!reg_value))
+ break;
+ }
+
+ if ((!!check_st) != (!!reg_value))
+ return -EBUSY;
+
+ return 0;
+}
+
+static int hns_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hns_mdio_device *mdio_dev = bus->priv;
+ u32 cmd_reg_value;
+ int i;
+
+ /* waitting for MDIO_COMMAND_REG 's mdio_start==0 */
+ /* after that can do read or write*/
+ for (i = 0; i < MDIO_TIMEOUT; i++) {
+ cmd_reg_value = MDIO_GET_REG_BIT(mdio_dev,
+ MDIO_COMMAND_REG,
+ MDIO_CMD_START_B);
+ if (!cmd_reg_value)
+ break;
+ }
+ if ((i == MDIO_TIMEOUT) && cmd_reg_value)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static void hns_mdio_cmd_write(struct hns_mdio_device *mdio_dev,
+ u8 is_c45, u8 op, u8 phy_id, u16 cmd)
+{
+ u32 cmd_reg_value;
+ u8 st = is_c45 ? MDIO_ST_CLAUSE_45 : MDIO_ST_CLAUSE_22;
+
+ cmd_reg_value = st << MDIO_CMD_ST_S;
+ cmd_reg_value |= op << MDIO_CMD_OP_S;
+ cmd_reg_value |=
+ (phy_id & MDIO_CMD_PRTAD_M) << MDIO_CMD_PRTAD_S;
+ cmd_reg_value |= (cmd & MDIO_CMD_DEVAD_M) << MDIO_CMD_DEVAD_S;
+ cmd_reg_value |= 1 << MDIO_CMD_START_B;
+
+ MDIO_WRITE_REG(mdio_dev, MDIO_COMMAND_REG, cmd_reg_value);
+}
+
+/**
+ * hns_mdio_write - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ * @data: register value
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_write(struct mii_bus *bus,
+ int phy_id, int regnum, u16 data)
+{
+ int ret;
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ u8 devad = ((regnum >> 16) & 0x1f);
+ u8 is_c45 = !!(regnum & MII_ADDR_C45);
+ u16 reg = (u16)(regnum & 0xffff);
+ u8 op;
+ u16 cmd_reg_cfg;
+
+ dev_dbg(&bus->dev, "mdio write %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x, write data=%d\n",
+ phy_id, is_c45, devad, reg, data);
+
+ /* wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ if (!is_c45) {
+ cmd_reg_cfg = reg;
+ op = MDIO_C22_WRITE;
+ } else {
+ /* config the cmd-reg to write addr*/
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ /* config the data needed writing */
+ cmd_reg_cfg = devad;
+ op = MDIO_C45_WRITE_DATA;
+ }
+
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_WDATA_REG, MDIO_WDATA_DATA_M,
+ MDIO_WDATA_DATA_S, data);
+
+ hns_mdio_cmd_write(mdio_dev, is_c45, op, phy_id, cmd_reg_cfg);
+
+ return 0;
+}
+
+/**
+ * hns_mdio_read - access phy register
+ * @bus: mdio bus
+ * @phy_id: phy id
+ * @regnum: register num
+ *
+ * Return phy register value
+ */
+static int hns_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{
+ int ret;
+ u16 reg_val = 0;
+ u8 devad = ((regnum >> 16) & 0x1f);
+ u8 is_c45 = !!(regnum & MII_ADDR_C45);
+ u16 reg = (u16)(regnum & 0xffff);
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+
+ dev_dbg(&bus->dev, "mdio read %s,base is %p\n",
+ bus->id, mdio_dev->vbase);
+ dev_dbg(&bus->dev, "phy id=%d, is_c45=%d, devad=%d, reg=%#x!\n",
+ phy_id, is_c45, devad, reg);
+
+ /* Step 1: wait for ready */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ if (!is_c45) {
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C22_READ, phy_id, reg);
+ } else {
+ MDIO_SET_REG_FIELD(mdio_dev, MDIO_ADDR_REG, MDIO_ADDR_DATA_M,
+ MDIO_ADDR_DATA_S, reg);
+
+ /* Step 2; config the cmd-reg to write addr*/
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C45_WRITE_ADDR, phy_id, devad);
+
+ /* Step 3: check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ hns_mdio_cmd_write(mdio_dev, is_c45,
+ MDIO_C45_READ, phy_id, devad);
+ }
+
+ /* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
+ /* check for read or write opt is finished */
+ ret = hns_mdio_wait_ready(bus);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO bus is busy\n");
+ return ret;
+ }
+
+ reg_val = MDIO_GET_REG_BIT(mdio_dev, MDIO_STA_REG, MDIO_STATE_STA_B);
+ if (reg_val) {
+ dev_err(&bus->dev, " ERROR! MDIO Read failed!\n");
+ return -EBUSY;
+ }
+
+ /* Step 6; get out data*/
+ reg_val = (u16)MDIO_GET_REG_FIELD(mdio_dev, MDIO_RDATA_REG,
+ MDIO_RDATA_DATA_M, MDIO_RDATA_DATA_S);
+
+ return reg_val;
+}
+
+/**
+ * hns_mdio_reset - reset mdio bus
+ * @bus: mdio bus
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_reset(struct mii_bus *bus)
+{
+ struct hns_mdio_device *mdio_dev = (struct hns_mdio_device *)bus->priv;
+ const struct hns_mdio_sc_reg *sc_reg;
+ int ret;
+
+ if (dev_of_node(bus->parent)) {
+ if (!mdio_dev->subctrl_vbase) {
+ dev_err(&bus->dev, "mdio sys ctl reg has not maped\n");
+ return -ENODEV;
+ }
+
+ sc_reg = &mdio_dev->sc_reg;
+ /* 1. reset req, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_req,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO reset fail\n");
+ return ret;
+ }
+
+ /* 2. dis clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_dis,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+
+ /* 3. reset dreq, and read reset st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_reset_dreq,
+ 0x1, sc_reg->mdio_reset_st, 0x1,
+ MDIO_CHECK_CLR_ST);
+ if (ret) {
+ dev_err(&bus->dev, "MDIO dis clk fail\n");
+ return ret;
+ }
+
+ /* 4. en clk, and read clk st check */
+ ret = mdio_sc_cfg_reg_write(mdio_dev, sc_reg->mdio_clk_en,
+ 0x1, sc_reg->mdio_clk_st, 0x1,
+ MDIO_CHECK_SET_ST);
+ if (ret)
+ dev_err(&bus->dev, "MDIO en clk fail\n");
+ } else if (is_acpi_node(bus->parent->fwnode)) {
+ acpi_status s;
+
+ s = acpi_evaluate_object(ACPI_HANDLE(bus->parent),
+ "_RST", NULL, NULL);
+ if (ACPI_FAILURE(s)) {
+ dev_err(&bus->dev, "Reset failed, return:%#x\n", s);
+ ret = -EBUSY;
+ } else {
+ ret = 0;
+ }
+ } else {
+ dev_err(&bus->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
+ return ret;
+}
+
+/**
+ * hns_mdio_probe - probe mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_probe(struct platform_device *pdev)
+{
+ struct hns_mdio_device *mdio_dev;
+ struct mii_bus *new_bus;
+ int ret = -ENODEV;
+
+ if (!pdev) {
+ dev_err(NULL, "pdev is NULL!\r\n");
+ return -ENODEV;
+ }
+
+ mdio_dev = devm_kzalloc(&pdev->dev, sizeof(*mdio_dev), GFP_KERNEL);
+ if (!mdio_dev)
+ return -ENOMEM;
+
+ new_bus = devm_mdiobus_alloc(&pdev->dev);
+ if (!new_bus) {
+ dev_err(&pdev->dev, "mdiobus_alloc fail!\n");
+ return -ENOMEM;
+ }
+
+ new_bus->name = MDIO_BUS_NAME;
+ new_bus->read = hns_mdio_read;
+ new_bus->write = hns_mdio_write;
+ new_bus->reset = hns_mdio_reset;
+ new_bus->priv = mdio_dev;
+ new_bus->parent = &pdev->dev;
+
+ mdio_dev->vbase = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(mdio_dev->vbase)) {
+ ret = PTR_ERR(mdio_dev->vbase);
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, new_bus);
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%s", "Mii",
+ dev_name(&pdev->dev));
+ if (dev_of_node(&pdev->dev)) {
+ struct of_phandle_args reg_args;
+
+ ret = of_parse_phandle_with_fixed_args(pdev->dev.of_node,
+ "subctrl-vbase",
+ 4,
+ 0,
+ &reg_args);
+ if (!ret) {
+ mdio_dev->subctrl_vbase =
+ syscon_node_to_regmap(reg_args.np);
+ if (IS_ERR(mdio_dev->subctrl_vbase)) {
+ dev_warn(&pdev->dev, "syscon_node_to_regmap error\n");
+ mdio_dev->subctrl_vbase = NULL;
+ } else {
+ if (reg_args.args_count == 4) {
+ mdio_dev->sc_reg.mdio_clk_en =
+ (u16)reg_args.args[0];
+ mdio_dev->sc_reg.mdio_clk_dis =
+ (u16)reg_args.args[0] + 4;
+ mdio_dev->sc_reg.mdio_reset_req =
+ (u16)reg_args.args[1];
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ (u16)reg_args.args[1] + 4;
+ mdio_dev->sc_reg.mdio_clk_st =
+ (u16)reg_args.args[2];
+ mdio_dev->sc_reg.mdio_reset_st =
+ (u16)reg_args.args[3];
+ } else {
+ /* for compatible */
+ mdio_dev->sc_reg.mdio_clk_en =
+ MDIO_SC_CLK_EN;
+ mdio_dev->sc_reg.mdio_clk_dis =
+ MDIO_SC_CLK_DIS;
+ mdio_dev->sc_reg.mdio_reset_req =
+ MDIO_SC_RESET_REQ;
+ mdio_dev->sc_reg.mdio_reset_dreq =
+ MDIO_SC_RESET_DREQ;
+ mdio_dev->sc_reg.mdio_clk_st =
+ MDIO_SC_CLK_ST;
+ mdio_dev->sc_reg.mdio_reset_st =
+ MDIO_SC_RESET_ST;
+ }
+ }
+ } else {
+ dev_warn(&pdev->dev, "find syscon ret = %#x\n", ret);
+ mdio_dev->subctrl_vbase = NULL;
+ }
+
+ ret = of_mdiobus_register(new_bus, pdev->dev.of_node);
+ } else if (is_acpi_node(pdev->dev.fwnode)) {
+ /* Clear all the IRQ properties */
+ memset(new_bus->irq, PHY_POLL, 4 * PHY_MAX_ADDR);
+
+ /* Mask out all PHYs from auto probing. */
+ new_bus->phy_mask = ~0;
+
+ /* Register the MDIO bus */
+ ret = mdiobus_register(new_bus);
+ } else {
+ dev_err(&pdev->dev, "Can not get cfg data from DT or ACPI\n");
+ ret = -ENXIO;
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register as MDIO bus!\n");
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * hns_mdio_remove - remove mdio device
+ * @pdev: mdio platform device
+ *
+ * Return 0 on success, negative on failure
+ */
+static int hns_mdio_remove(struct platform_device *pdev)
+{
+ struct mii_bus *bus;
+
+ bus = platform_get_drvdata(pdev);
+
+ mdiobus_unregister(bus);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static const struct of_device_id hns_mdio_match[] = {
+ {.compatible = "hisilicon,mdio"},
+ {.compatible = "hisilicon,hns-mdio"},
+ {}
+};
+MODULE_DEVICE_TABLE(of, hns_mdio_match);
+
+static const struct acpi_device_id hns_mdio_acpi_match[] = {
+ { "HISI0141", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hns_mdio_acpi_match);
+
+static struct platform_driver hns_mdio_driver = {
+ .probe = hns_mdio_probe,
+ .remove = hns_mdio_remove,
+ .driver = {
+ .name = MDIO_DRV_NAME,
+ .of_match_table = hns_mdio_match,
+ .acpi_match_table = ACPI_PTR(hns_mdio_acpi_match),
+ },
+};
+
+module_platform_driver(hns_mdio_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
+MODULE_DESCRIPTION("Hisilicon HNS MDIO driver");
+MODULE_ALIAS("platform:" MDIO_DRV_NAME);