summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mediatek
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/net/ethernet/mediatek
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/mediatek')
-rw-r--r--drivers/net/ethernet/mediatek/Kconfig36
-rw-r--r--drivers/net/ethernet/mediatek/Makefile13
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_path.c287
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c5240
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h1444
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.c1104
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe.h405
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c194
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_offload.c635
-rw-r--r--drivers/net/ethernet/mediatek/mtk_ppe_regs.h174
-rw-r--r--drivers/net/ethernet/mediatek/mtk_star_emac.c1754
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.c1974
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed.h169
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_debugfs.c267
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_mcu.c395
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_ops.c8
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_regs.h470
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c503
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.h282
19 files changed, 15354 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig
new file mode 100644
index 0000000000..da0db417ab
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/Kconfig
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NET_VENDOR_MEDIATEK
+ bool "MediaTek devices"
+ depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST
+ help
+ If you have a Mediatek SoC with ethernet, say Y.
+
+if NET_VENDOR_MEDIATEK
+
+config NET_MEDIATEK_SOC_WED
+ depends on ARCH_MEDIATEK || COMPILE_TEST
+ def_bool NET_MEDIATEK_SOC != n
+
+config NET_MEDIATEK_SOC
+ tristate "MediaTek SoC Gigabit Ethernet support"
+ depends on NET_DSA || !NET_DSA
+ select PINCTRL
+ select PHYLINK
+ select DIMLIB
+ select PAGE_POOL
+ select PAGE_POOL_STATS
+ select PCS_MTK_LYNXI
+ select REGMAP_MMIO
+ help
+ This driver supports the gigabit ethernet MACs in the
+ MediaTek SoC family.
+
+config NET_MEDIATEK_STAR_EMAC
+ tristate "MediaTek STAR Ethernet MAC support"
+ select PHYLIB
+ select REGMAP_MMIO
+ help
+ This driver supports the ethernet MAC IP first used on
+ MediaTek MT85** SoCs.
+
+endif #NET_VENDOR_MEDIATEK
diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile
new file mode 100644
index 0000000000..03e008fbc8
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Mediatek SoCs built-in ethernet macs
+#
+
+obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o
+mtk_eth-y := mtk_eth_soc.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o mtk_wed_mcu.o mtk_wed_wo.o
+ifdef CONFIG_DEBUG_FS
+mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o
+endif
+obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o
+obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c
new file mode 100644
index 0000000000..7c27a19c4d
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-2019 MediaTek Inc.
+
+/* A library for configuring path from GMAC/GDM to target PHY
+ *
+ * Author: Sean Wang <sean.wang@mediatek.com>
+ *
+ */
+
+#include <linux/phy.h>
+#include <linux/regmap.h>
+
+#include "mtk_eth_soc.h"
+
+struct mtk_eth_muxc {
+ const char *name;
+ int cap_bit;
+ int (*set_path)(struct mtk_eth *eth, u64 path);
+};
+
+static const char *mtk_eth_path_name(u64 path)
+{
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_RGMII:
+ return "gmac1_rgmii";
+ case MTK_ETH_PATH_GMAC1_TRGMII:
+ return "gmac1_trgmii";
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ return "gmac1_sgmii";
+ case MTK_ETH_PATH_GMAC2_RGMII:
+ return "gmac2_rgmii";
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ return "gmac2_sgmii";
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ return "gmac2_gephy";
+ case MTK_ETH_PATH_GDM1_ESW:
+ return "gdm1_esw";
+ default:
+ return "unknown path";
+ }
+}
+
+static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, u64 path)
+{
+ bool updated = true;
+ u32 mask, set, reg;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = 0;
+ break;
+ case MTK_ETH_PATH_GDM1_ESW:
+ mask = ~(u32)MTK_MUX_TO_ESW;
+ set = MTK_MUX_TO_ESW;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (mtk_is_netsys_v3_or_greater(eth))
+ reg = MTK_MAC_MISC_V3;
+ else
+ reg = MTK_MAC_MISC;
+
+ if (updated)
+ mtk_m32(eth, mask, set, reg);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, u64 path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val = ~(u32)GEPHY_MAC_SEL;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, u64 path)
+{
+ unsigned int val = 0, mask = 0, reg = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_U3_COPHY_V2)) {
+ reg = USB_PHY_SWITCH_REG;
+ val = SGMII_QPHY_SEL;
+ mask = QPHY_SEL_MASK;
+ } else {
+ reg = INFRA_MISC2;
+ val = CO_QPHY_SEL;
+ mask = val;
+ }
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->infra, reg, mask, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, u64 path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val = SYSCFG0_SGMII_GMAC1;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val = SYSCFG0_SGMII_GMAC2;
+ break;
+ case MTK_ETH_PATH_GMAC1_RGMII:
+ case MTK_ETH_PATH_GMAC2_RGMII:
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= SYSCFG0_SGMII_MASK;
+
+ if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) ||
+ (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2))
+ val = 0;
+ else
+ updated = false;
+ break;
+ default:
+ updated = false;
+ break;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, u64 path)
+{
+ unsigned int val = 0;
+ bool updated = true;
+
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ switch (path) {
+ case MTK_ETH_PATH_GMAC1_SGMII:
+ val |= SYSCFG0_SGMII_GMAC1_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_GEPHY:
+ val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ case MTK_ETH_PATH_GMAC2_SGMII:
+ val |= SYSCFG0_SGMII_GMAC2_V2;
+ break;
+ default:
+ updated = false;
+ }
+
+ if (updated)
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, val);
+
+ dev_dbg(eth->dev, "path %s in %s updated = %d\n",
+ mtk_eth_path_name(path), __func__, updated);
+
+ return 0;
+}
+
+static const struct mtk_eth_muxc mtk_eth_muxc[] = {
+ {
+ .name = "mux_gdm1_to_gmac1_esw",
+ .cap_bit = MTK_ETH_MUX_GDM1_TO_GMAC1_ESW,
+ .set_path = set_mux_gdm1_to_gmac1_esw,
+ }, {
+ .name = "mux_gmac2_gmac0_to_gephy",
+ .cap_bit = MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY,
+ .set_path = set_mux_gmac2_gmac0_to_gephy,
+ }, {
+ .name = "mux_u3_gmac2_to_qphy",
+ .cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY,
+ .set_path = set_mux_u3_gmac2_to_qphy,
+ }, {
+ .name = "mux_gmac1_gmac2_to_sgmii_rgmii",
+ .cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII,
+ .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii,
+ }, {
+ .name = "mux_gmac12_to_gephy_sgmii",
+ .cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII,
+ .set_path = set_mux_gmac12_to_gephy_sgmii,
+ },
+};
+
+static int mtk_eth_mux_setup(struct mtk_eth *eth, u64 path)
+{
+ int i, err = 0;
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, path)) {
+ dev_err(eth->dev, "path %s isn't support on the SoC\n",
+ mtk_eth_path_name(path));
+ return -EINVAL;
+ }
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX))
+ return 0;
+
+ /* Setup MUX in path fabric */
+ for (i = 0; i < ARRAY_SIZE(mtk_eth_muxc); i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, mtk_eth_muxc[i].cap_bit)) {
+ err = mtk_eth_muxc[i].set_path(eth, path);
+ if (err)
+ goto out;
+ } else {
+ dev_dbg(eth->dev, "mux %s isn't present on the SoC\n",
+ mtk_eth_muxc[i].name);
+ }
+ }
+
+out:
+ return err;
+}
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ u64 path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII :
+ MTK_ETH_PATH_GMAC2_SGMII;
+
+ /* Setup proper MUXes along the path */
+ return mtk_eth_mux_setup(eth, path);
+}
+
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ u64 path = 0;
+
+ if (mac_id == 1)
+ path = MTK_ETH_PATH_GMAC2_GEPHY;
+
+ if (!path)
+ return -EINVAL;
+
+ /* Setup proper MUXes along the path */
+ return mtk_eth_mux_setup(eth, path);
+}
+
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id)
+{
+ u64 path;
+
+ path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII :
+ MTK_ETH_PATH_GMAC2_RGMII;
+
+ /* Setup proper MUXes along the path */
+ return mtk_eth_mux_setup(eth, path);
+}
+
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
new file mode 100644
index 0000000000..20afe79f38
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -0,0 +1,5240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/if_vlan.h>
+#include <linux/reset.h>
+#include <linux/tcp.h>
+#include <linux/interrupt.h>
+#include <linux/pinctrl/devinfo.h>
+#include <linux/phylink.h>
+#include <linux/pcs/pcs-mtk-lynxi.h>
+#include <linux/jhash.h>
+#include <linux/bitfield.h>
+#include <net/dsa.h>
+#include <net/dst_metadata.h>
+#include <net/page_pool/helpers.h>
+
+#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
+
+static int mtk_msg_level = -1;
+module_param_named(msg_level, mtk_msg_level, int, 0);
+MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)");
+
+#define MTK_ETHTOOL_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, x) / sizeof(u64) }
+
+#define MTK_ETHTOOL_XDP_STAT(x) { #x, \
+ offsetof(struct mtk_hw_stats, xdp_stats.x) / \
+ sizeof(u64) }
+
+static const struct mtk_reg_map mtk_reg_map = {
+ .tx_irq_mask = 0x1a1c,
+ .tx_irq_status = 0x1a18,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .adma_rx_dbg0 = 0x0a38,
+ .int_grp = 0x0a50,
+ },
+ .qdma = {
+ .qtx_cfg = 0x1800,
+ .qtx_sch = 0x1804,
+ .rx_ptr = 0x1900,
+ .rx_cnt_cfg = 0x1904,
+ .qcrx_ptr = 0x1908,
+ .glo_cfg = 0x1a04,
+ .rst_idx = 0x1a08,
+ .delay_irq = 0x1a0c,
+ .fc_th = 0x1a10,
+ .tx_sch_rate = 0x1a14,
+ .int_grp = 0x1a20,
+ .hred = 0x1a44,
+ .ctx_ptr = 0x1b00,
+ .dtx_ptr = 0x1b04,
+ .crx_ptr = 0x1b10,
+ .drx_ptr = 0x1b14,
+ .fq_head = 0x1b20,
+ .fq_tail = 0x1b24,
+ .fq_count = 0x1b28,
+ .fq_blen = 0x1b2c,
+ },
+ .gdm1_cnt = 0x2400,
+ .gdma_to_ppe = 0x4444,
+ .ppe_base = 0x0c00,
+ .wdma_base = {
+ [0] = 0x2800,
+ [1] = 0x2c00,
+ },
+ .pse_iq_sta = 0x0110,
+ .pse_oq_sta = 0x0118,
+};
+
+static const struct mtk_reg_map mt7628_reg_map = {
+ .tx_irq_mask = 0x0a28,
+ .tx_irq_status = 0x0a20,
+ .pdma = {
+ .rx_ptr = 0x0900,
+ .rx_cnt_cfg = 0x0904,
+ .pcrx_ptr = 0x0908,
+ .glo_cfg = 0x0a04,
+ .rst_idx = 0x0a08,
+ .delay_irq = 0x0a0c,
+ .irq_status = 0x0a20,
+ .irq_mask = 0x0a28,
+ .int_grp = 0x0a50,
+ },
+};
+
+static const struct mtk_reg_map mt7986_reg_map = {
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+ .rx_ptr = 0x6100,
+ .rx_cnt_cfg = 0x6104,
+ .pcrx_ptr = 0x6108,
+ .glo_cfg = 0x6204,
+ .rst_idx = 0x6208,
+ .delay_irq = 0x620c,
+ .irq_status = 0x6220,
+ .irq_mask = 0x6228,
+ .adma_rx_dbg0 = 0x6238,
+ .int_grp = 0x6250,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+ .qtx_sch = 0x4404,
+ .rx_ptr = 0x4500,
+ .rx_cnt_cfg = 0x4504,
+ .qcrx_ptr = 0x4508,
+ .glo_cfg = 0x4604,
+ .rst_idx = 0x4608,
+ .delay_irq = 0x460c,
+ .fc_th = 0x4610,
+ .int_grp = 0x4620,
+ .hred = 0x4644,
+ .ctx_ptr = 0x4700,
+ .dtx_ptr = 0x4704,
+ .crx_ptr = 0x4710,
+ .drx_ptr = 0x4714,
+ .fq_head = 0x4720,
+ .fq_tail = 0x4724,
+ .fq_count = 0x4728,
+ .fq_blen = 0x472c,
+ .tx_sch_rate = 0x4798,
+ },
+ .gdm1_cnt = 0x1c00,
+ .gdma_to_ppe = 0x3333,
+ .ppe_base = 0x2000,
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
+ },
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
+};
+
+static const struct mtk_reg_map mt7988_reg_map = {
+ .tx_irq_mask = 0x461c,
+ .tx_irq_status = 0x4618,
+ .pdma = {
+ .rx_ptr = 0x6900,
+ .rx_cnt_cfg = 0x6904,
+ .pcrx_ptr = 0x6908,
+ .glo_cfg = 0x6a04,
+ .rst_idx = 0x6a08,
+ .delay_irq = 0x6a0c,
+ .irq_status = 0x6a20,
+ .irq_mask = 0x6a28,
+ .adma_rx_dbg0 = 0x6a38,
+ .int_grp = 0x6a50,
+ },
+ .qdma = {
+ .qtx_cfg = 0x4400,
+ .qtx_sch = 0x4404,
+ .rx_ptr = 0x4500,
+ .rx_cnt_cfg = 0x4504,
+ .qcrx_ptr = 0x4508,
+ .glo_cfg = 0x4604,
+ .rst_idx = 0x4608,
+ .delay_irq = 0x460c,
+ .fc_th = 0x4610,
+ .int_grp = 0x4620,
+ .hred = 0x4644,
+ .ctx_ptr = 0x4700,
+ .dtx_ptr = 0x4704,
+ .crx_ptr = 0x4710,
+ .drx_ptr = 0x4714,
+ .fq_head = 0x4720,
+ .fq_tail = 0x4724,
+ .fq_count = 0x4728,
+ .fq_blen = 0x472c,
+ .tx_sch_rate = 0x4798,
+ },
+ .gdm1_cnt = 0x1c00,
+ .gdma_to_ppe = 0x3333,
+ .ppe_base = 0x2000,
+ .wdma_base = {
+ [0] = 0x4800,
+ [1] = 0x4c00,
+ },
+ .pse_iq_sta = 0x0180,
+ .pse_oq_sta = 0x01a0,
+};
+
+/* strings used by ethtool */
+static const struct mtk_ethtool_stats {
+ char str[ETH_GSTRING_LEN];
+ u32 offset;
+} mtk_ethtool_stats[] = {
+ MTK_ETHTOOL_STAT(tx_bytes),
+ MTK_ETHTOOL_STAT(tx_packets),
+ MTK_ETHTOOL_STAT(tx_skip),
+ MTK_ETHTOOL_STAT(tx_collisions),
+ MTK_ETHTOOL_STAT(rx_bytes),
+ MTK_ETHTOOL_STAT(rx_packets),
+ MTK_ETHTOOL_STAT(rx_overflow),
+ MTK_ETHTOOL_STAT(rx_fcs_errors),
+ MTK_ETHTOOL_STAT(rx_short_errors),
+ MTK_ETHTOOL_STAT(rx_long_errors),
+ MTK_ETHTOOL_STAT(rx_checksum_errors),
+ MTK_ETHTOOL_STAT(rx_flow_control_packets),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_pass),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_drop),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx),
+ MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit),
+ MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors),
+};
+
+static const char * const mtk_clks_source_name[] = {
+ "ethif",
+ "sgmiitop",
+ "esw",
+ "gp0",
+ "gp1",
+ "gp2",
+ "gp3",
+ "xgp1",
+ "xgp2",
+ "xgp3",
+ "crypto",
+ "fe",
+ "trgpll",
+ "sgmii_tx250m",
+ "sgmii_rx250m",
+ "sgmii_cdr_ref",
+ "sgmii_cdr_fb",
+ "sgmii2_tx250m",
+ "sgmii2_rx250m",
+ "sgmii2_cdr_ref",
+ "sgmii2_cdr_fb",
+ "sgmii_ck",
+ "eth2pll",
+ "wocpu0",
+ "wocpu1",
+ "netsys0",
+ "netsys1",
+ "ethwarp_wocpu2",
+ "ethwarp_wocpu1",
+ "ethwarp_wocpu0",
+ "top_usxgmii0_sel",
+ "top_usxgmii1_sel",
+ "top_sgm0_sel",
+ "top_sgm1_sel",
+ "top_xfi_phy0_xtal_sel",
+ "top_xfi_phy1_xtal_sel",
+ "top_eth_gmii_sel",
+ "top_eth_refck_50m_sel",
+ "top_eth_sys_200m_sel",
+ "top_eth_sys_sel",
+ "top_eth_xgmii_sel",
+ "top_eth_mii_sel",
+ "top_netsys_sel",
+ "top_netsys_500m_sel",
+ "top_netsys_pao_2x_sel",
+ "top_netsys_sync_250m_sel",
+ "top_netsys_ppefb_250m_sel",
+ "top_netsys_warp_sel",
+};
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
+{
+ __raw_writel(val, eth->base + reg);
+}
+
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg)
+{
+ return __raw_readl(eth->base + reg);
+}
+
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg)
+{
+ u32 val;
+
+ val = mtk_r32(eth, reg);
+ val &= ~mask;
+ val |= set;
+ mtk_w32(eth, val, reg);
+ return reg;
+}
+
+static int mtk_mdio_busy_wait(struct mtk_eth *eth)
+{
+ unsigned long t_start = jiffies;
+
+ while (1) {
+ if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS))
+ return 0;
+ if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT))
+ break;
+ cond_resched();
+ }
+
+ dev_err(eth->dev, "mdio: MDIO timeout\n");
+ return -ETIMEDOUT;
+}
+
+static int _mtk_mdio_write_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg,
+ u32 write_data)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int _mtk_mdio_write_c45(struct mtk_eth *eth, u32 phy_addr,
+ u32 devad, u32 phy_reg, u32 write_data)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(phy_reg),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_WRITE |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(write_data),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int _mtk_mdio_read_c22(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C22 |
+ PHY_IAC_CMD_C22_READ |
+ PHY_IAC_REG(phy_reg) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
+}
+
+static int _mtk_mdio_read_c45(struct mtk_eth *eth, u32 phy_addr,
+ u32 devad, u32 phy_reg)
+{
+ int ret;
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_ADDR |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr) |
+ PHY_IAC_DATA(phy_reg),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ mtk_w32(eth, PHY_IAC_ACCESS |
+ PHY_IAC_START_C45 |
+ PHY_IAC_CMD_C45_READ |
+ PHY_IAC_REG(devad) |
+ PHY_IAC_ADDR(phy_addr),
+ MTK_PHY_IAC);
+
+ ret = mtk_mdio_busy_wait(eth);
+ if (ret < 0)
+ return ret;
+
+ return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK;
+}
+
+static int mtk_mdio_write_c22(struct mii_bus *bus, int phy_addr,
+ int phy_reg, u16 val)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_write_c22(eth, phy_addr, phy_reg, val);
+}
+
+static int mtk_mdio_write_c45(struct mii_bus *bus, int phy_addr,
+ int devad, int phy_reg, u16 val)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_write_c45(eth, phy_addr, devad, phy_reg, val);
+}
+
+static int mtk_mdio_read_c22(struct mii_bus *bus, int phy_addr, int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read_c22(eth, phy_addr, phy_reg);
+}
+
+static int mtk_mdio_read_c45(struct mii_bus *bus, int phy_addr, int devad,
+ int phy_reg)
+{
+ struct mtk_eth *eth = bus->priv;
+
+ return _mtk_mdio_read_c45(eth, phy_addr, devad, phy_reg);
+}
+
+static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ u32 val;
+
+ val = (interface == PHY_INTERFACE_MODE_TRGMII) ?
+ ETHSYS_TRGMII_MT7621_DDR_PLL : 0;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0,
+ ETHSYS_TRGMII_MT7621_MASK, val);
+
+ return 0;
+}
+
+static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
+ phy_interface_t interface)
+{
+ int ret;
+
+ if (interface == PHY_INTERFACE_MODE_TRGMII) {
+ mtk_w32(eth, TRGMII_MODE, INTF_MODE);
+ ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], 500000000);
+ if (ret)
+ dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
+ return;
+ }
+
+ dev_err(eth->dev, "Missing PLL configuration, ethernet may not work\n");
+}
+
+static void mtk_setup_bridge_switch(struct mtk_eth *eth)
+{
+ /* Force Port1 XGMAC Link Up */
+ mtk_m32(eth, 0, MTK_XGMAC_FORCE_LINK(MTK_GMAC1_ID),
+ MTK_XGMAC_STS(MTK_GMAC1_ID));
+
+ /* Adjust GSW bridge IPG to 11 */
+ mtk_m32(eth, GSWTX_IPG_MASK | GSWRX_IPG_MASK,
+ (GSW_IPG_11 << GSWTX_IPG_SHIFT) |
+ (GSW_IPG_11 << GSWRX_IPG_SHIFT),
+ MTK_GSW_CFG);
+}
+
+static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ unsigned int sid;
+
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface)) {
+ sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ?
+ 0 : mac->id;
+
+ return eth->sgmii_pcs[sid];
+ }
+
+ return NULL;
+}
+
+static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ int val, ge_mode, err = 0;
+ u32 i;
+
+ /* MT76x8 has no hardware settings between for the MAC */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ mac->interface != state->interface) {
+ /* Setup soc pin functions */
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_TRGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_MII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) {
+ err = mtk_gmac_rgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_1000BASEX:
+ case PHY_INTERFACE_MODE_2500BASEX:
+ case PHY_INTERFACE_MODE_SGMII:
+ err = mtk_gmac_sgmii_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) {
+ err = mtk_gmac_gephy_path_setup(eth, mac->id);
+ if (err)
+ goto init_err;
+ }
+ break;
+ case PHY_INTERFACE_MODE_INTERNAL:
+ break;
+ default:
+ goto err_phy;
+ }
+
+ /* Setup clock for 1st gmac */
+ if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII &&
+ !phy_interface_mode_is_8023z(state->interface) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) {
+ if (MTK_HAS_CAPS(mac->hw->soc->caps,
+ MTK_TRGMII_MT7621_CLK)) {
+ if (mt7621_gmac0_rgmii_adjust(mac->hw,
+ state->interface))
+ goto err_phy;
+ } else {
+ mtk_gmac0_rgmii_adjust(mac->hw,
+ state->interface);
+
+ /* mt7623_pad_clk_setup */
+ for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
+ mtk_w32(mac->hw,
+ TD_DM_DRVP(8) | TD_DM_DRVN(8),
+ TRGMII_TD_ODT(i));
+
+ /* Assert/release MT7623 RXC reset */
+ mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL,
+ TRGMII_RCK_CTRL);
+ mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL);
+ }
+ }
+
+ switch (state->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ ge_mode = 1;
+ break;
+ default:
+ ge_mode = 0;
+ break;
+ }
+
+ /* put the gmac into the right mode */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+ val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id);
+ val |= SYSCFG0_GE_MODE(ge_mode, mac->id);
+ regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val);
+
+ mac->interface = state->interface;
+ }
+
+ /* SGMII */
+ if (state->interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(state->interface)) {
+ /* The path GMAC to SGMII will be enabled once the SGMIISYS is
+ * being setup done.
+ */
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val);
+
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK,
+ ~(u32)SYSCFG0_SGMII_MASK);
+
+ /* Save the syscfg0 value for mac_finish */
+ mac->syscfg0 = val;
+ } else if (phylink_autoneg_inband(mode)) {
+ dev_err(eth->dev,
+ "In-band mode not supported in non SGMII mode!\n");
+ return;
+ }
+
+ /* Setup gmac */
+ if (mtk_is_netsys_v3_or_greater(eth) &&
+ mac->interface == PHY_INTERFACE_MODE_INTERNAL) {
+ mtk_w32(mac->hw, MTK_GDMA_XGDM_SEL, MTK_GDMA_EG_CTRL(mac->id));
+ mtk_w32(mac->hw, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(mac->id));
+
+ mtk_setup_bridge_switch(eth);
+ }
+
+ return;
+
+err_phy:
+ dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__,
+ mac->id, phy_modes(state->interface));
+ return;
+
+init_err:
+ dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__,
+ mac->id, phy_modes(state->interface), err);
+}
+
+static int mtk_mac_finish(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ /* Enable SGMII */
+ if (interface == PHY_INTERFACE_MODE_SGMII ||
+ phy_interface_mode_is_8023z(interface))
+ regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0,
+ SYSCFG0_SGMII_MASK, mac->syscfg0);
+
+ /* Setup gmac */
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur;
+ mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE |
+ MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK |
+ MAC_MCR_RX_FIFO_CLR_DIS;
+
+ /* Only update control register when needed! */
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+
+ return 0;
+}
+
+static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+
+ mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN);
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
+
+static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx,
+ int speed)
+{
+ const struct mtk_soc_data *soc = eth->soc;
+ u32 ofs, val;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ return;
+
+ val = MTK_QTX_SCH_MIN_RATE_EN |
+ /* minimum: 10 Mbps */
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
+ MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
+ if (mtk_is_netsys_v1(eth))
+ val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
+
+ if (IS_ENABLED(CONFIG_SOC_MT7621)) {
+ switch (speed) {
+ case SPEED_10:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 2) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_100:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_1000:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 105) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (speed) {
+ case SPEED_10:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 4) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_100:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5);
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1);
+ break;
+ case SPEED_1000:
+ val |= MTK_QTX_SCH_MAX_RATE_EN |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) |
+ FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10);
+ break;
+ default:
+ break;
+ }
+ }
+
+ ofs = MTK_QTX_OFFSET * idx;
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
+}
+
+static void mtk_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex, bool tx_pause, bool rx_pause)
+{
+ struct mtk_mac *mac = container_of(config, struct mtk_mac,
+ phylink_config);
+ u32 mcr;
+
+ mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 |
+ MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC |
+ MAC_MCR_FORCE_RX_FC);
+
+ /* Configure speed */
+ mac->speed = speed;
+ switch (speed) {
+ case SPEED_2500:
+ case SPEED_1000:
+ mcr |= MAC_MCR_SPEED_1000;
+ break;
+ case SPEED_100:
+ mcr |= MAC_MCR_SPEED_100;
+ break;
+ }
+
+ /* Configure duplex */
+ if (duplex == DUPLEX_FULL)
+ mcr |= MAC_MCR_FORCE_DPX;
+
+ /* Configure pause modes - phylink will avoid these for half duplex */
+ if (tx_pause)
+ mcr |= MAC_MCR_FORCE_TX_FC;
+ if (rx_pause)
+ mcr |= MAC_MCR_FORCE_RX_FC;
+
+ mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN;
+ mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id));
+}
+
+static const struct phylink_mac_ops mtk_phylink_ops = {
+ .mac_select_pcs = mtk_mac_select_pcs,
+ .mac_config = mtk_mac_config,
+ .mac_finish = mtk_mac_finish,
+ .mac_link_down = mtk_mac_link_down,
+ .mac_link_up = mtk_mac_link_up,
+};
+
+static int mtk_mdio_init(struct mtk_eth *eth)
+{
+ unsigned int max_clk = 2500000, divider;
+ struct device_node *mii_np;
+ int ret;
+ u32 val;
+
+ mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
+ if (!mii_np) {
+ dev_err(eth->dev, "no %s child node found", "mdio-bus");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(mii_np)) {
+ ret = -ENODEV;
+ goto err_put_node;
+ }
+
+ eth->mii_bus = devm_mdiobus_alloc(eth->dev);
+ if (!eth->mii_bus) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ eth->mii_bus->name = "mdio";
+ eth->mii_bus->read = mtk_mdio_read_c22;
+ eth->mii_bus->write = mtk_mdio_write_c22;
+ eth->mii_bus->read_c45 = mtk_mdio_read_c45;
+ eth->mii_bus->write_c45 = mtk_mdio_write_c45;
+ eth->mii_bus->priv = eth;
+ eth->mii_bus->parent = eth->dev;
+
+ snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np);
+
+ if (!of_property_read_u32(mii_np, "clock-frequency", &val)) {
+ if (val > MDC_MAX_FREQ || val < MDC_MAX_FREQ / MDC_MAX_DIVIDER) {
+ dev_err(eth->dev, "MDIO clock frequency out of range");
+ ret = -EINVAL;
+ goto err_put_node;
+ }
+ max_clk = val;
+ }
+ divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63);
+
+ /* Configure MDC Turbo Mode */
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3);
+
+ /* Configure MDC Divider */
+ val = FIELD_PREP(PPSC_MDC_CFG, divider);
+ if (!mtk_is_netsys_v3_or_greater(eth))
+ val |= PPSC_MDC_TURBO;
+ mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC);
+
+ dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider);
+
+ ret = of_mdiobus_register(eth->mii_bus, mii_np);
+
+err_put_node:
+ of_node_put(mii_np);
+ return ret;
+}
+
+static void mtk_mdio_cleanup(struct mtk_eth *eth)
+{
+ if (!eth->mii_bus)
+ return;
+
+ mdiobus_unregister(eth->mii_bus);
+}
+
+static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&eth->tx_irq_lock, flags);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask);
+ spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
+}
+
+static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&eth->tx_irq_lock, flags);
+ val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask);
+ spin_unlock_irqrestore(&eth->tx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&eth->rx_irq_lock, flags);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask);
+ spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
+}
+
+static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask)
+{
+ unsigned long flags;
+ u32 val;
+
+ spin_lock_irqsave(&eth->rx_irq_lock, flags);
+ val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask);
+ mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask);
+ spin_unlock_irqrestore(&eth->rx_irq_lock, flags);
+}
+
+static int mtk_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret = eth_mac_addr(dev, p);
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ const char *macaddr = dev->dev_addr;
+
+ if (ret)
+ return ret;
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ spin_lock_bh(&mac->hw->page_lock);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MT7628_SDM_MAC_ADRH);
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MT7628_SDM_MAC_ADRL);
+ } else {
+ mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1],
+ MTK_GDMA_MAC_ADRH(mac->id));
+ mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) |
+ (macaddr[4] << 8) | macaddr[5],
+ MTK_GDMA_MAC_ADRL(mac->id));
+ }
+ spin_unlock_bh(&mac->hw->page_lock);
+
+ return 0;
+}
+
+void mtk_stats_update_mac(struct mtk_mac *mac)
+{
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ struct mtk_eth *eth = mac->hw;
+
+ u64_stats_update_begin(&hw_stats->syncp);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT);
+ hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT);
+ hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT);
+ hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, MT7628_SDM_CS_ERR);
+ } else {
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ unsigned int offs = hw_stats->reg_offset;
+ u64 stats;
+
+ hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs);
+ if (stats)
+ hw_stats->rx_bytes += (stats << 32);
+ hw_stats->rx_packets +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs);
+ hw_stats->rx_overflow +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs);
+ hw_stats->rx_fcs_errors +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs);
+ hw_stats->rx_short_errors +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs);
+ hw_stats->rx_long_errors +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs);
+ hw_stats->rx_checksum_errors +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs);
+ hw_stats->rx_flow_control_packets +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x50 + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x54 + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x40 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x44 + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x48 + offs);
+ } else {
+ hw_stats->tx_skip +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs);
+ hw_stats->tx_collisions +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs);
+ hw_stats->tx_bytes +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs);
+ stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs);
+ if (stats)
+ hw_stats->tx_bytes += (stats << 32);
+ hw_stats->tx_packets +=
+ mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs);
+ }
+ }
+
+ u64_stats_update_end(&hw_stats->syncp);
+}
+
+static void mtk_stats_update(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->mac[i] || !eth->mac[i]->hw_stats)
+ continue;
+ if (spin_trylock(&eth->mac[i]->hw_stats->stats_lock)) {
+ mtk_stats_update_mac(eth->mac[i]);
+ spin_unlock(&eth->mac[i]->hw_stats->stats_lock);
+ }
+ }
+}
+
+static void mtk_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *storage)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ unsigned int start;
+
+ if (netif_running(dev) && netif_device_present(dev)) {
+ if (spin_trylock_bh(&hw_stats->stats_lock)) {
+ mtk_stats_update_mac(mac);
+ spin_unlock_bh(&hw_stats->stats_lock);
+ }
+ }
+
+ do {
+ start = u64_stats_fetch_begin(&hw_stats->syncp);
+ storage->rx_packets = hw_stats->rx_packets;
+ storage->tx_packets = hw_stats->tx_packets;
+ storage->rx_bytes = hw_stats->rx_bytes;
+ storage->tx_bytes = hw_stats->tx_bytes;
+ storage->collisions = hw_stats->tx_collisions;
+ storage->rx_length_errors = hw_stats->rx_short_errors +
+ hw_stats->rx_long_errors;
+ storage->rx_over_errors = hw_stats->rx_overflow;
+ storage->rx_crc_errors = hw_stats->rx_fcs_errors;
+ storage->rx_errors = hw_stats->rx_checksum_errors;
+ storage->tx_aborted_errors = hw_stats->tx_skip;
+ } while (u64_stats_fetch_retry(&hw_stats->syncp, start));
+
+ storage->tx_errors = dev->stats.tx_errors;
+ storage->rx_dropped = dev->stats.rx_dropped;
+ storage->tx_dropped = dev->stats.tx_dropped;
+}
+
+static inline int mtk_max_frag_size(int mtu)
+{
+ /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */
+ if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K)
+ mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
+
+ return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+}
+
+static inline int mtk_max_buf_size(int frag_size)
+{
+ int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN -
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K);
+
+ return buf_size;
+}
+
+static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd,
+ struct mtk_rx_dma_v2 *dma_rxd)
+{
+ rxd->rxd2 = READ_ONCE(dma_rxd->rxd2);
+ if (!(rxd->rxd2 & RX_DMA_DONE))
+ return false;
+
+ rxd->rxd1 = READ_ONCE(dma_rxd->rxd1);
+ rxd->rxd3 = READ_ONCE(dma_rxd->rxd3);
+ rxd->rxd4 = READ_ONCE(dma_rxd->rxd4);
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ rxd->rxd5 = READ_ONCE(dma_rxd->rxd5);
+ rxd->rxd6 = READ_ONCE(dma_rxd->rxd6);
+ }
+
+ return true;
+}
+
+static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask)
+{
+ unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH);
+ unsigned long data;
+
+ data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN,
+ get_order(size));
+
+ return (void *)data;
+}
+
+/* the qdma core needs scratch memory to be setup */
+static int mtk_init_fq_dma(struct mtk_eth *eth)
+{
+ const struct mtk_soc_data *soc = eth->soc;
+ dma_addr_t phy_ring_tail;
+ int cnt = MTK_QDMA_RING_SIZE;
+ dma_addr_t dma_addr;
+ int i;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM))
+ eth->scratch_ring = eth->sram_base;
+ else
+ eth->scratch_ring = dma_alloc_coherent(eth->dma_dev,
+ cnt * soc->txrx.txd_size,
+ &eth->phy_scratch_ring,
+ GFP_KERNEL);
+ if (unlikely(!eth->scratch_ring))
+ return -ENOMEM;
+
+ eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL);
+ if (unlikely(!eth->scratch_head))
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr)))
+ return -ENOMEM;
+
+ phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1);
+
+ for (i = 0; i < cnt; i++) {
+ struct mtk_tx_dma_v2 *txd;
+
+ txd = eth->scratch_ring + i * soc->txrx.txd_size;
+ txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE;
+ if (i < cnt - 1)
+ txd->txd2 = eth->phy_scratch_ring +
+ (i + 1) * soc->txrx.txd_size;
+
+ txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE);
+ txd->txd4 = 0;
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
+ }
+
+ mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head);
+ mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail);
+ mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count);
+ mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen);
+
+ return 0;
+}
+
+static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc)
+{
+ return ring->dma + (desc - ring->phys);
+}
+
+static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
+ void *txd, u32 txd_size)
+{
+ int idx = (txd - ring->dma) / txd_size;
+
+ return &ring->buf[idx];
+}
+
+static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring,
+ struct mtk_tx_dma *dma)
+{
+ return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma;
+}
+
+static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size)
+{
+ return (dma - ring->dma) / txd_size;
+}
+
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+ struct xdp_frame_bulk *bq, bool napi)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
+ dma_unmap_single(eth->dma_dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
+ dma_unmap_page(eth->dma_dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+ } else {
+ if (dma_unmap_len(tx_buf, dma_len0)) {
+ dma_unmap_page(eth->dma_dev,
+ dma_unmap_addr(tx_buf, dma_addr0),
+ dma_unmap_len(tx_buf, dma_len0),
+ DMA_TO_DEVICE);
+ }
+
+ if (dma_unmap_len(tx_buf, dma_len1)) {
+ dma_unmap_page(eth->dma_dev,
+ dma_unmap_addr(tx_buf, dma_addr1),
+ dma_unmap_len(tx_buf, dma_len1),
+ DMA_TO_DEVICE);
+ }
+ }
+
+ if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB) {
+ struct sk_buff *skb = tx_buf->data;
+
+ if (napi)
+ napi_consume_skb(skb, napi);
+ else
+ dev_kfree_skb_any(skb);
+ } else {
+ struct xdp_frame *xdpf = tx_buf->data;
+
+ if (napi && tx_buf->type == MTK_TYPE_XDP_TX)
+ xdp_return_frame_rx_napi(xdpf);
+ else if (bq)
+ xdp_return_frame_bulk(xdpf, bq);
+ else
+ xdp_return_frame(xdpf);
+ }
+ }
+ tx_buf->flags = 0;
+ tx_buf->data = NULL;
+}
+
+static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf,
+ struct mtk_tx_dma *txd, dma_addr_t mapped_addr,
+ size_t size, int idx)
+{
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ } else {
+ if (idx & 1) {
+ txd->txd3 = mapped_addr;
+ txd->txd2 |= TX_DMA_PLEN1(size);
+ dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len1, size);
+ } else {
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+ txd->txd1 = mapped_addr;
+ txd->txd2 = TX_DMA_PLEN0(size);
+ dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr);
+ dma_unmap_len_set(tx_buf, dma_len0, size);
+ }
+ }
+}
+
+static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_dma *desc = txd;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_SWC | TX_DMA_PLEN0(info->size) |
+ FIELD_PREP(TX_DMA_PQID, info->qid);
+ if (info->last)
+ data |= TX_DMA_LS0;
+ WRITE_ONCE(desc->txd3, data);
+
+ data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM;
+ /* vlan header offload */
+ if (info->vlan)
+ data |= TX_DMA_INS_VLAN | info->vlan_tci;
+ }
+ WRITE_ONCE(desc->txd4, data);
+}
+
+static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma_v2 *desc = txd;
+ struct mtk_eth *eth = mac->hw;
+ u32 data;
+
+ WRITE_ONCE(desc->txd1, info->addr);
+
+ data = TX_DMA_PLEN0(info->size);
+ if (info->last)
+ data |= TX_DMA_LS0;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ data |= TX_DMA_PREP_ADDR64(info->addr);
+
+ WRITE_ONCE(desc->txd3, data);
+
+ /* set forward port */
+ switch (mac->id) {
+ case MTK_GMAC1_ID:
+ data = PSE_GDM1_PORT << TX_DMA_FPORT_SHIFT_V2;
+ break;
+ case MTK_GMAC2_ID:
+ data = PSE_GDM2_PORT << TX_DMA_FPORT_SHIFT_V2;
+ break;
+ case MTK_GMAC3_ID:
+ data = PSE_GDM3_PORT << TX_DMA_FPORT_SHIFT_V2;
+ break;
+ }
+
+ data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid);
+ WRITE_ONCE(desc->txd4, data);
+
+ data = 0;
+ if (info->first) {
+ if (info->gso)
+ data |= TX_DMA_TSO_V2;
+ /* tx checksum offload */
+ if (info->csum)
+ data |= TX_DMA_CHKSUM_V2;
+ if (mtk_is_netsys_v3_or_greater(eth) && netdev_uses_dsa(dev))
+ data |= TX_DMA_SPTAG_V3;
+ }
+ WRITE_ONCE(desc->txd5, data);
+
+ data = 0;
+ if (info->first && info->vlan)
+ data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci;
+ WRITE_ONCE(desc->txd6, data);
+
+ WRITE_ONCE(desc->txd7, 0);
+ WRITE_ONCE(desc->txd8, 0);
+}
+
+static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd,
+ struct mtk_tx_dma_desc_info *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ if (mtk_is_netsys_v2_or_greater(eth))
+ mtk_tx_set_dma_desc_v2(dev, txd, info);
+ else
+ mtk_tx_set_dma_desc_v1(dev, txd, info);
+}
+
+static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
+ int tx_num, struct mtk_tx_ring *ring, bool gso)
+{
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = skb_headlen(skb),
+ .gso = gso,
+ .csum = skb->ip_summed == CHECKSUM_PARTIAL,
+ .vlan = skb_vlan_tag_present(skb),
+ .qid = skb_get_queue_mapping(skb),
+ .vlan_tci = skb_vlan_tag_get(skb),
+ .first = true,
+ .last = !skb_is_nonlinear(skb),
+ };
+ struct netdev_queue *txq;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_dma *itxd, *txd;
+ struct mtk_tx_dma *itxd_pdma, *txd_pdma;
+ struct mtk_tx_buf *itx_buf, *tx_buf;
+ int i, n_desc = 1;
+ int queue = skb_get_queue_mapping(skb);
+ int k = 0;
+
+ txq = netdev_get_tx_queue(dev, queue);
+ itxd = ring->next_free;
+ itxd_pdma = qdma_to_pdma(ring, itxd);
+ if (itxd == ring->last_free)
+ return -ENOMEM;
+
+ itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+ memset(itx_buf, 0, sizeof(*itx_buf));
+
+ txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
+ return -ENOMEM;
+
+ mtk_tx_set_dma_desc(dev, itxd, &txd_info);
+
+ itx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ itx_buf->mac_id = mac->id;
+ setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size,
+ k++);
+
+ /* TX SG offload */
+ txd = itxd;
+ txd_pdma = qdma_to_pdma(ring, txd);
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ unsigned int offset = 0;
+ int frag_size = skb_frag_size(frag);
+
+ while (frag_size) {
+ bool new_desc = true;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) ||
+ (i & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ txd_pdma = qdma_to_pdma(ring, txd);
+ if (txd == ring->last_free)
+ goto err_dma;
+
+ n_desc++;
+ } else {
+ new_desc = false;
+ }
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = min_t(unsigned int, frag_size,
+ soc->txrx.dma_max_len);
+ txd_info.qid = queue;
+ txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 &&
+ !(frag_size - txd_info.size);
+ txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag,
+ offset, txd_info.size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr)))
+ goto err_dma;
+
+ mtk_tx_set_dma_desc(dev, txd, &txd_info);
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
+ if (new_desc)
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+ tx_buf->flags |= MTK_TX_FLAGS_PAGE0;
+ tx_buf->mac_id = mac->id;
+
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr,
+ txd_info.size, k++);
+
+ frag_size -= txd_info.size;
+ offset += txd_info.size;
+ }
+ }
+
+ /* store skb to cleanup */
+ itx_buf->type = MTK_TYPE_SKB;
+ itx_buf->data = skb;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ if (k & 0x1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ netdev_tx_sent_queue(txq, skb->len);
+ skb_tx_timestamp(skb);
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ if (netif_xmit_stopped(txq) || !netdev_xmit_more())
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
+ } else {
+ int next_idx;
+
+ next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size),
+ ring->dma_size);
+ mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0);
+ }
+
+ return 0;
+
+err_dma:
+ do {
+ tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size);
+
+ /* unmap dma */
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+ itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ itxd_pdma->txd2 = TX_DMA_DESP2_DEF;
+
+ itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
+ itxd_pdma = qdma_to_pdma(ring, itxd);
+ } while (itxd != txd);
+
+ return -ENOMEM;
+}
+
+static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb)
+{
+ int i, nfrags = 1;
+ skb_frag_t *frag;
+
+ if (skb_is_gso(skb)) {
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ nfrags += DIV_ROUND_UP(skb_frag_size(frag),
+ eth->soc->txrx.dma_max_len);
+ }
+ } else {
+ nfrags += skb_shinfo(skb)->nr_frags;
+ }
+
+ return nfrags;
+}
+
+static int mtk_queue_stopped(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+ if (netif_queue_stopped(eth->netdev[i]))
+ return 1;
+ }
+
+ return 0;
+}
+
+static void mtk_wake_queue(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+ netif_tx_wake_all_queues(eth->netdev[i]);
+ }
+}
+
+static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct net_device_stats *stats = &dev->stats;
+ bool gso = false;
+ int tx_num;
+
+ /* normally we can rely on the stack not calling this more than once,
+ * however we have 2 queues running on the same ring so we need to lock
+ * the ring access
+ */
+ spin_lock(&eth->page_lock);
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ goto drop;
+
+ tx_num = mtk_cal_txd_req(eth, skb);
+ if (unlikely(atomic_read(&ring->free_count) <= tx_num)) {
+ netif_tx_stop_all_queues(dev);
+ netif_err(eth, tx_queued, dev,
+ "Tx Ring full when queue awake!\n");
+ spin_unlock(&eth->page_lock);
+ return NETDEV_TX_BUSY;
+ }
+
+ /* TSO: fill MSS info in tcp checksum field */
+ if (skb_is_gso(skb)) {
+ if (skb_cow_head(skb, 0)) {
+ netif_warn(eth, tx_err, dev,
+ "GSO expand head fail.\n");
+ goto drop;
+ }
+
+ if (skb_shinfo(skb)->gso_type &
+ (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ gso = true;
+ tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size);
+ }
+ }
+
+ if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0)
+ goto drop;
+
+ if (unlikely(atomic_read(&ring->free_count) <= ring->thresh))
+ netif_tx_stop_all_queues(dev);
+
+ spin_unlock(&eth->page_lock);
+
+ return NETDEV_TX_OK;
+
+drop:
+ spin_unlock(&eth->page_lock);
+ stats->tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+}
+
+static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth)
+{
+ int i;
+ struct mtk_rx_ring *ring;
+ int idx;
+
+ if (!eth->hwlro)
+ return &eth->rx_ring[0];
+
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ struct mtk_rx_dma *rxd;
+
+ ring = &eth->rx_ring[i];
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ if (rxd->rxd2 & RX_DMA_DONE) {
+ ring->calc_idx_update = true;
+ return ring;
+ }
+ }
+
+ return NULL;
+}
+
+static void mtk_update_rx_cpu_idx(struct mtk_eth *eth)
+{
+ struct mtk_rx_ring *ring;
+ int i;
+
+ if (!eth->hwlro) {
+ ring = &eth->rx_ring[0];
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ } else {
+ for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) {
+ ring = &eth->rx_ring[i];
+ if (ring->calc_idx_update) {
+ ring->calc_idx_update = false;
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+ }
+ }
+ }
+}
+
+static bool mtk_page_pool_enabled(struct mtk_eth *eth)
+{
+ return mtk_is_netsys_v2_or_greater(eth);
+}
+
+static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth,
+ struct xdp_rxq_info *xdp_q,
+ int id, int size)
+{
+ struct page_pool_params pp_params = {
+ .order = 0,
+ .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV,
+ .pool_size = size,
+ .nid = NUMA_NO_NODE,
+ .dev = eth->dma_dev,
+ .offset = MTK_PP_HEADROOM,
+ .max_len = MTK_PP_MAX_BUF_SIZE,
+ };
+ struct page_pool *pp;
+ int err;
+
+ pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL
+ : DMA_FROM_DEVICE;
+ pp = page_pool_create(&pp_params);
+ if (IS_ERR(pp))
+ return pp;
+
+ err = __xdp_rxq_info_reg(xdp_q, &eth->dummy_dev, id,
+ eth->rx_napi.napi_id, PAGE_SIZE);
+ if (err < 0)
+ goto err_free_pp;
+
+ err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp);
+ if (err)
+ goto err_unregister_rxq;
+
+ return pp;
+
+err_unregister_rxq:
+ xdp_rxq_info_unreg(xdp_q);
+err_free_pp:
+ page_pool_destroy(pp);
+
+ return ERR_PTR(err);
+}
+
+static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr,
+ gfp_t gfp_mask)
+{
+ struct page *page;
+
+ page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN);
+ if (!page)
+ return NULL;
+
+ *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM;
+ return page_address(page);
+}
+
+static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi)
+{
+ if (ring->page_pool)
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(data), napi);
+ else
+ skb_free_frag(data);
+}
+
+static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev,
+ struct mtk_tx_dma_desc_info *txd_info,
+ struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf,
+ void *data, u16 headroom, int index, bool dma_map)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma *txd_pdma;
+
+ if (dma_map) { /* ndo_xdp_xmit */
+ txd_info->addr = dma_map_single(eth->dma_dev, data,
+ txd_info->size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr)))
+ return -ENOMEM;
+
+ tx_buf->flags |= MTK_TX_FLAGS_SINGLE0;
+ } else {
+ struct page *page = virt_to_head_page(data);
+
+ txd_info->addr = page_pool_get_dma_addr(page) +
+ sizeof(struct xdp_frame) + headroom;
+ dma_sync_single_for_device(eth->dma_dev, txd_info->addr,
+ txd_info->size, DMA_BIDIRECTIONAL);
+ }
+ mtk_tx_set_dma_desc(dev, txd, txd_info);
+
+ tx_buf->mac_id = mac->id;
+ tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX;
+ tx_buf->data = (void *)MTK_DMA_DUMMY_DESC;
+
+ txd_pdma = qdma_to_pdma(ring, txd);
+ setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size,
+ index);
+
+ return 0;
+}
+
+static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf,
+ struct net_device *dev, bool dma_map)
+{
+ struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_tx_dma_desc_info txd_info = {
+ .size = xdpf->len,
+ .first = true,
+ .last = !xdp_frame_has_frags(xdpf),
+ .qid = mac->id,
+ };
+ int err, index = 0, n_desc = 1, nr_frags;
+ struct mtk_tx_buf *htx_buf, *tx_buf;
+ struct mtk_tx_dma *htxd, *txd;
+ void *data = xdpf->data;
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ return -EBUSY;
+
+ nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
+ if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags))
+ return -EBUSY;
+
+ spin_lock(&eth->page_lock);
+
+ txd = ring->next_free;
+ if (txd == ring->last_free) {
+ spin_unlock(&eth->page_lock);
+ return -ENOMEM;
+ }
+ htxd = txd;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ htx_buf = tx_buf;
+
+ for (;;) {
+ err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf,
+ data, xdpf->headroom, index, dma_map);
+ if (err < 0)
+ goto unmap;
+
+ if (txd_info.last)
+ break;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) {
+ txd = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ if (txd == ring->last_free)
+ goto unmap;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, txd,
+ soc->txrx.txd_size);
+ memset(tx_buf, 0, sizeof(*tx_buf));
+ n_desc++;
+ }
+
+ memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info));
+ txd_info.size = skb_frag_size(&sinfo->frags[index]);
+ txd_info.last = index + 1 == nr_frags;
+ txd_info.qid = mac->id;
+ data = skb_frag_address(&sinfo->frags[index]);
+
+ index++;
+ }
+ /* store xdpf for cleanup */
+ htx_buf->data = xdpf;
+
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd);
+
+ if (index & 1)
+ txd_pdma->txd2 |= TX_DMA_LS0;
+ else
+ txd_pdma->txd2 |= TX_DMA_LS1;
+ }
+
+ ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2);
+ atomic_sub(n_desc, &ring->free_count);
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr);
+ } else {
+ int idx;
+
+ idx = txd_to_idx(ring, txd, soc->txrx.txd_size);
+ mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size),
+ MT7628_TX_CTX_IDX0);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return 0;
+
+unmap:
+ while (htxd != txd) {
+ tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size);
+ mtk_tx_unmap(eth, tx_buf, NULL, false);
+
+ htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd);
+
+ txd_pdma->txd2 = TX_DMA_DESP2_DEF;
+ }
+
+ htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2);
+ }
+
+ spin_unlock(&eth->page_lock);
+
+ return err;
+}
+
+static int mtk_xdp_xmit(struct net_device *dev, int num_frame,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ struct mtk_eth *eth = mac->hw;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < num_frame; i++) {
+ if (mtk_xdp_submit_frame(eth, frames[i], dev, true))
+ break;
+ nxmit++;
+ }
+
+ u64_stats_update_begin(&hw_stats->syncp);
+ hw_stats->xdp_stats.tx_xdp_xmit += nxmit;
+ hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit;
+ u64_stats_update_end(&hw_stats->syncp);
+
+ return nxmit;
+}
+
+static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring,
+ struct xdp_buff *xdp, struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hw_stats = mac->hw_stats;
+ u64 *count = &hw_stats->xdp_stats.rx_xdp_drop;
+ struct bpf_prog *prog;
+ u32 act = XDP_PASS;
+
+ rcu_read_lock();
+
+ prog = rcu_dereference(eth->prog);
+ if (!prog)
+ goto out;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ count = &hw_stats->xdp_stats.rx_xdp_pass;
+ goto update_stats;
+ case XDP_REDIRECT:
+ if (unlikely(xdp_do_redirect(dev, xdp, prog))) {
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_redirect;
+ goto update_stats;
+ case XDP_TX: {
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+
+ if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) {
+ count = &hw_stats->xdp_stats.rx_xdp_tx_errors;
+ act = XDP_DROP;
+ break;
+ }
+
+ count = &hw_stats->xdp_stats.rx_xdp_tx;
+ goto update_stats;
+ }
+ default:
+ bpf_warn_invalid_xdp_action(dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ break;
+ }
+
+ page_pool_put_full_page(ring->page_pool,
+ virt_to_head_page(xdp->data), true);
+
+update_stats:
+ u64_stats_update_begin(&hw_stats->syncp);
+ *count = *count + 1;
+ u64_stats_update_end(&hw_stats->syncp);
+out:
+ rcu_read_unlock();
+
+ return act;
+}
+
+static int mtk_poll_rx(struct napi_struct *napi, int budget,
+ struct mtk_eth *eth)
+{
+ struct dim_sample dim_sample = {};
+ struct mtk_rx_ring *ring;
+ bool xdp_flush = false;
+ int idx;
+ struct sk_buff *skb;
+ u64 addr64 = 0;
+ u8 *data, *new_data;
+ struct mtk_rx_dma_v2 *rxd, trxd;
+ int done = 0, bytes = 0;
+ dma_addr_t dma_addr = DMA_MAPPING_ERROR;
+
+ while (done < budget) {
+ unsigned int pktlen, *rxdcsum;
+ struct net_device *netdev;
+ u32 hash, reason;
+ int mac = 0;
+
+ ring = mtk_get_rx_ring(eth);
+ if (unlikely(!ring))
+ goto rx_done;
+
+ idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size);
+ rxd = ring->dma + idx * eth->soc->txrx.rxd_size;
+ data = ring->data[idx];
+
+ if (!mtk_rx_get_desc(eth, &trxd, rxd))
+ break;
+
+ /* find out which mac the packet come from. values start at 1 */
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ u32 val = RX_DMA_GET_SPORT_V2(trxd.rxd5);
+
+ switch (val) {
+ case PSE_GDM1_PORT:
+ case PSE_GDM2_PORT:
+ mac = val - 1;
+ break;
+ case PSE_GDM3_PORT:
+ mac = MTK_GMAC3_ID;
+ break;
+ default:
+ break;
+ }
+ } else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) &&
+ !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) {
+ mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1;
+ }
+
+ if (unlikely(mac < 0 || mac >= MTK_MAX_DEVS ||
+ !eth->netdev[mac]))
+ goto release_desc;
+
+ netdev = eth->netdev[mac];
+
+ if (unlikely(test_bit(MTK_RESETTING, &eth->state)))
+ goto release_desc;
+
+ pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
+
+ /* alloc new buffer */
+ if (ring->page_pool) {
+ struct page *page = virt_to_head_page(data);
+ struct xdp_buff xdp;
+ u32 ret;
+
+ new_data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr,
+ GFP_ATOMIC);
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_sync_single_for_cpu(eth->dma_dev,
+ page_pool_get_dma_addr(page) + MTK_PP_HEADROOM,
+ pktlen, page_pool_get_dma_dir(ring->page_pool));
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q);
+ xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen,
+ false);
+ xdp_buff_clear_frags_flag(&xdp);
+
+ ret = mtk_xdp_run(eth, ring, &xdp, netdev);
+ if (ret == XDP_REDIRECT)
+ xdp_flush = true;
+
+ if (ret != XDP_PASS)
+ goto skip_rx;
+
+ skb = build_skb(data, PAGE_SIZE);
+ if (unlikely(!skb)) {
+ page_pool_put_full_page(ring->page_pool,
+ page, true);
+ netdev->stats.rx_dropped++;
+ goto skip_rx;
+ }
+
+ skb_reserve(skb, xdp.data - xdp.data_hard_start);
+ skb_put(skb, xdp.data_end - xdp.data);
+ skb_mark_for_recycle(skb);
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ new_data = napi_alloc_frag(ring->frag_size);
+ else
+ new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC);
+
+ if (unlikely(!new_data)) {
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ new_data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(new_data);
+ netdev->stats.rx_dropped++;
+ goto release_desc;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ addr64 = RX_DMA_GET_ADDR64(trxd.rxd2);
+
+ dma_unmap_single(eth->dma_dev, ((u64)trxd.rxd1 | addr64),
+ ring->buf_size, DMA_FROM_DEVICE);
+
+ skb = build_skb(data, ring->frag_size);
+ if (unlikely(!skb)) {
+ netdev->stats.rx_dropped++;
+ skb_free_frag(data);
+ goto skip_rx;
+ }
+
+ skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
+ skb_put(skb, pktlen);
+ }
+
+ skb->dev = netdev;
+ bytes += skb->len;
+
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5);
+ hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY;
+ if (hash != MTK_RXD5_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd3;
+ } else {
+ reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
+ hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY;
+ if (hash != MTK_RXD4_FOE_ENTRY)
+ skb_set_hash(skb, jhash_1word(hash, 0),
+ PKT_HASH_TYPE_L4);
+ rxdcsum = &trxd.rxd4;
+ }
+
+ if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb_checksum_none_assert(skb);
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ /* When using VLAN untagging in combination with DSA, the
+ * hardware treats the MTK special tag as a VLAN and untags it.
+ */
+ if (mtk_is_netsys_v1(eth) && (trxd.rxd2 & RX_DMA_VTAG) &&
+ netdev_uses_dsa(netdev)) {
+ unsigned int port = RX_DMA_VPID(trxd.rxd3) & GENMASK(2, 0);
+
+ if (port < ARRAY_SIZE(eth->dsa_meta) &&
+ eth->dsa_meta[port])
+ skb_dst_set_noref(skb, &eth->dsa_meta[port]->dst);
+ }
+
+ if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
+
+ skb_record_rx_queue(skb, 0);
+ napi_gro_receive(napi, skb);
+
+skip_rx:
+ ring->data[idx] = new_data;
+ rxd->rxd1 = (unsigned int)dma_addr;
+release_desc:
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ rxd->rxd2 = RX_DMA_LSO;
+ else
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA) &&
+ likely(dma_addr != DMA_MAPPING_ERROR))
+ rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
+
+ ring->calc_idx = idx;
+ done++;
+ }
+
+rx_done:
+ if (done) {
+ /* make sure that all changes to the dma ring are flushed before
+ * we continue
+ */
+ wmb();
+ mtk_update_rx_cpu_idx(eth);
+ }
+
+ eth->rx_packets += done;
+ eth->rx_bytes += bytes;
+ dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes,
+ &dim_sample);
+ net_dim(&eth->rx_dim, dim_sample);
+
+ if (xdp_flush)
+ xdp_do_flush_map();
+
+ return done;
+}
+
+struct mtk_poll_state {
+ struct netdev_queue *txq;
+ unsigned int total;
+ unsigned int done;
+ unsigned int bytes;
+};
+
+static void
+mtk_poll_tx_done(struct mtk_eth *eth, struct mtk_poll_state *state, u8 mac,
+ struct sk_buff *skb)
+{
+ struct netdev_queue *txq;
+ struct net_device *dev;
+ unsigned int bytes = skb->len;
+
+ state->total++;
+ eth->tx_packets++;
+ eth->tx_bytes += bytes;
+
+ dev = eth->netdev[mac];
+ if (!dev)
+ return;
+
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ if (state->txq == txq) {
+ state->done++;
+ state->bytes += bytes;
+ return;
+ }
+
+ if (state->txq)
+ netdev_tx_completed_queue(state->txq, state->done, state->bytes);
+
+ state->txq = txq;
+ state->done = 1;
+ state->bytes = bytes;
+}
+
+static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget,
+ struct mtk_poll_state *state)
+{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
+ u32 cpu, dma;
+
+ cpu = ring->last_free_ptr;
+ dma = mtk_r32(eth, reg_map->qdma.drx_ptr);
+
+ desc = mtk_qdma_phys_to_virt(ring, cpu);
+ xdp_frame_bulk_init(&bq);
+
+ while ((cpu != dma) && budget) {
+ u32 next_cpu = desc->txd2;
+
+ desc = mtk_qdma_phys_to_virt(ring, desc->txd2);
+ if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0)
+ break;
+
+ tx_buf = mtk_desc_to_tx_buf(ring, desc,
+ eth->soc->txrx.txd_size);
+ if (!tx_buf->data)
+ break;
+
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB)
+ mtk_poll_tx_done(eth, state, tx_buf->mac_id,
+ tx_buf->data);
+
+ budget--;
+ }
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
+
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = next_cpu;
+ }
+ xdp_flush_frame_bulk(&bq);
+
+ ring->last_free_ptr = cpu;
+ mtk_w32(eth, cpu, reg_map->qdma.crx_ptr);
+
+ return budget;
+}
+
+static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget,
+ struct mtk_poll_state *state)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct mtk_tx_buf *tx_buf;
+ struct xdp_frame_bulk bq;
+ struct mtk_tx_dma *desc;
+ u32 cpu, dma;
+
+ cpu = ring->cpu_idx;
+ dma = mtk_r32(eth, MT7628_TX_DTX_IDX0);
+ xdp_frame_bulk_init(&bq);
+
+ while ((cpu != dma) && budget) {
+ tx_buf = &ring->buf[cpu];
+ if (!tx_buf->data)
+ break;
+
+ if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) {
+ if (tx_buf->type == MTK_TYPE_SKB)
+ mtk_poll_tx_done(eth, state, 0, tx_buf->data);
+ budget--;
+ }
+ mtk_tx_unmap(eth, tx_buf, &bq, true);
+
+ desc = ring->dma + cpu * eth->soc->txrx.txd_size;
+ ring->last_free = desc;
+ atomic_inc(&ring->free_count);
+
+ cpu = NEXT_DESP_IDX(cpu, ring->dma_size);
+ }
+ xdp_flush_frame_bulk(&bq);
+
+ ring->cpu_idx = cpu;
+
+ return budget;
+}
+
+static int mtk_poll_tx(struct mtk_eth *eth, int budget)
+{
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ struct dim_sample dim_sample = {};
+ struct mtk_poll_state state = {};
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ budget = mtk_poll_tx_qdma(eth, budget, &state);
+ else
+ budget = mtk_poll_tx_pdma(eth, budget, &state);
+
+ if (state.txq)
+ netdev_tx_completed_queue(state.txq, state.done, state.bytes);
+
+ dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes,
+ &dim_sample);
+ net_dim(&eth->tx_dim, dim_sample);
+
+ if (mtk_queue_stopped(eth) &&
+ (atomic_read(&ring->free_count) > ring->thresh))
+ mtk_wake_queue(eth);
+
+ return state.total;
+}
+
+static void mtk_handle_status_irq(struct mtk_eth *eth)
+{
+ u32 status2 = mtk_r32(eth, MTK_INT_STATUS2);
+
+ if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) {
+ mtk_stats_update(eth);
+ mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF),
+ MTK_INT_STATUS2);
+ }
+}
+
+static int mtk_napi_tx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ int tx_done = 0;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_handle_status_irq(eth);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status);
+ tx_done = mtk_poll_tx(eth, budget);
+
+ if (unlikely(netif_msg_intr(eth))) {
+ dev_info(eth->dev,
+ "done tx %d, intr 0x%08x/0x%x\n", tx_done,
+ mtk_r32(eth, reg_map->tx_irq_status),
+ mtk_r32(eth, reg_map->tx_irq_mask));
+ }
+
+ if (tx_done == budget)
+ return budget;
+
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
+ return budget;
+
+ if (napi_complete_done(napi, tx_done))
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+
+ return tx_done;
+}
+
+static int mtk_napi_rx(struct napi_struct *napi, int budget)
+{
+ struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ int rx_done_total = 0;
+
+ mtk_handle_status_irq(eth);
+
+ do {
+ int rx_done;
+
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask,
+ reg_map->pdma.irq_status);
+ rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth);
+ rx_done_total += rx_done;
+
+ if (unlikely(netif_msg_intr(eth))) {
+ dev_info(eth->dev,
+ "done rx %d, intr 0x%08x/0x%x\n", rx_done,
+ mtk_r32(eth, reg_map->pdma.irq_status),
+ mtk_r32(eth, reg_map->pdma.irq_mask));
+ }
+
+ if (rx_done_total == budget)
+ return budget;
+
+ } while (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask);
+
+ if (napi_complete_done(napi, rx_done_total))
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+
+ return rx_done_total;
+}
+
+static int mtk_tx_alloc(struct mtk_eth *eth)
+{
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ int i, sz = soc->txrx.txd_size;
+ struct mtk_tx_dma_v2 *txd;
+ int ring_size;
+ u32 ofs, val;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA))
+ ring_size = MTK_QDMA_RING_SIZE;
+ else
+ ring_size = MTK_DMA_SIZE;
+
+ ring->buf = kcalloc(ring_size, sizeof(*ring->buf),
+ GFP_KERNEL);
+ if (!ring->buf)
+ goto no_tx_mem;
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_SRAM)) {
+ ring->dma = eth->sram_base + ring_size * sz;
+ ring->phys = eth->phy_scratch_ring + ring_size * (dma_addr_t)sz;
+ } else {
+ ring->dma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
+ &ring->phys, GFP_KERNEL);
+ }
+
+ if (!ring->dma)
+ goto no_tx_mem;
+
+ for (i = 0; i < ring_size; i++) {
+ int next = (i + 1) % ring_size;
+ u32 next_ptr = ring->phys + next * sz;
+
+ txd = ring->dma + i * sz;
+ txd->txd2 = next_ptr;
+ txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
+ txd->txd4 = 0;
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ txd->txd5 = 0;
+ txd->txd6 = 0;
+ txd->txd7 = 0;
+ txd->txd8 = 0;
+ }
+ }
+
+ /* On MT7688 (PDMA only) this driver uses the ring->dma structs
+ * only as the framework. The real HW descriptors are the PDMA
+ * descriptors in ring->dma_pdma.
+ */
+ if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, ring_size * sz,
+ &ring->phys_pdma, GFP_KERNEL);
+ if (!ring->dma_pdma)
+ goto no_tx_mem;
+
+ for (i = 0; i < ring_size; i++) {
+ ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF;
+ ring->dma_pdma[i].txd4 = 0;
+ }
+ }
+
+ ring->dma_size = ring_size;
+ atomic_set(&ring->free_count, ring_size - 2);
+ ring->next_free = ring->dma;
+ ring->last_free = (void *)txd;
+ ring->last_free_ptr = (u32)(ring->phys + ((ring_size - 1) * sz));
+ ring->thresh = MAX_SKB_FRAGS;
+
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) {
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr);
+ mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr);
+ mtk_w32(eth,
+ ring->phys + ((ring_size - 1) * sz),
+ soc->reg_map->qdma.crx_ptr);
+ mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr);
+
+ for (i = 0, ofs = 0; i < MTK_QDMA_NUM_QUEUES; i++) {
+ val = (QDMA_RES_THRES << 8) | QDMA_RES_THRES;
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_cfg + ofs);
+
+ val = MTK_QTX_SCH_MIN_RATE_EN |
+ /* minimum: 10 Mbps */
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_MAN, 1) |
+ FIELD_PREP(MTK_QTX_SCH_MIN_RATE_EXP, 4) |
+ MTK_QTX_SCH_LEAKY_BUCKET_SIZE;
+ if (mtk_is_netsys_v1(eth))
+ val |= MTK_QTX_SCH_LEAKY_BUCKET_EN;
+ mtk_w32(eth, val, soc->reg_map->qdma.qtx_sch + ofs);
+ ofs += MTK_QTX_OFFSET;
+ }
+ val = MTK_QDMA_TX_SCH_MAX_WFQ | (MTK_QDMA_TX_SCH_MAX_WFQ << 16);
+ mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate);
+ if (mtk_is_netsys_v2_or_greater(eth))
+ mtk_w32(eth, val, soc->reg_map->qdma.tx_sch_rate + 4);
+ } else {
+ mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0);
+ mtk_w32(eth, ring_size, MT7628_TX_MAX_CNT0);
+ mtk_w32(eth, 0, MT7628_TX_CTX_IDX0);
+ mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx);
+ }
+
+ return 0;
+
+no_tx_mem:
+ return -ENOMEM;
+}
+
+static void mtk_tx_clean(struct mtk_eth *eth)
+{
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_tx_ring *ring = &eth->tx_ring;
+ int i;
+
+ if (ring->buf) {
+ for (i = 0; i < ring->dma_size; i++)
+ mtk_tx_unmap(eth, &ring->buf[i], NULL, false);
+ kfree(ring->buf);
+ ring->buf = NULL;
+ }
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && ring->dma) {
+ dma_free_coherent(eth->dma_dev,
+ ring->dma_size * soc->txrx.txd_size,
+ ring->dma, ring->phys);
+ ring->dma = NULL;
+ }
+
+ if (ring->dma_pdma) {
+ dma_free_coherent(eth->dma_dev,
+ ring->dma_size * soc->txrx.txd_size,
+ ring->dma_pdma, ring->phys_pdma);
+ ring->dma_pdma = NULL;
+ }
+}
+
+static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag)
+{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ struct mtk_rx_ring *ring;
+ int rx_data_len, rx_dma_size, tx_ring_size;
+ int i;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ tx_ring_size = MTK_QDMA_RING_SIZE;
+ else
+ tx_ring_size = MTK_DMA_SIZE;
+
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ if (ring_no)
+ return -EINVAL;
+ ring = &eth->rx_ring_qdma;
+ } else {
+ ring = &eth->rx_ring[ring_no];
+ }
+
+ if (rx_flag == MTK_RX_FLAGS_HWLRO) {
+ rx_data_len = MTK_MAX_LRO_RX_LENGTH;
+ rx_dma_size = MTK_HW_LRO_DMA_SIZE;
+ } else {
+ rx_data_len = ETH_DATA_LEN;
+ rx_dma_size = MTK_DMA_SIZE;
+ }
+
+ ring->frag_size = mtk_max_frag_size(rx_data_len);
+ ring->buf_size = mtk_max_buf_size(ring->frag_size);
+ ring->data = kcalloc(rx_dma_size, sizeof(*ring->data),
+ GFP_KERNEL);
+ if (!ring->data)
+ return -ENOMEM;
+
+ if (mtk_page_pool_enabled(eth)) {
+ struct page_pool *pp;
+
+ pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no,
+ rx_dma_size);
+ if (IS_ERR(pp))
+ return PTR_ERR(pp);
+
+ ring->page_pool = pp;
+ }
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM) ||
+ rx_flag != MTK_RX_FLAGS_NORMAL) {
+ ring->dma = dma_alloc_coherent(eth->dma_dev,
+ rx_dma_size * eth->soc->txrx.rxd_size,
+ &ring->phys, GFP_KERNEL);
+ } else {
+ struct mtk_tx_ring *tx_ring = &eth->tx_ring;
+
+ ring->dma = tx_ring->dma + tx_ring_size *
+ eth->soc->txrx.txd_size * (ring_no + 1);
+ ring->phys = tx_ring->phys + tx_ring_size *
+ eth->soc->txrx.txd_size * (ring_no + 1);
+ }
+
+ if (!ring->dma)
+ return -ENOMEM;
+
+ for (i = 0; i < rx_dma_size; i++) {
+ struct mtk_rx_dma_v2 *rxd;
+ dma_addr_t dma_addr;
+ void *data;
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (ring->page_pool) {
+ data = mtk_page_pool_get_buff(ring->page_pool,
+ &dma_addr, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ if (ring->frag_size <= PAGE_SIZE)
+ data = netdev_alloc_frag(ring->frag_size);
+ else
+ data = mtk_max_lro_buf_alloc(GFP_KERNEL);
+
+ if (!data)
+ return -ENOMEM;
+
+ dma_addr = dma_map_single(eth->dma_dev,
+ data + NET_SKB_PAD + eth->ip_align,
+ ring->buf_size, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(eth->dma_dev,
+ dma_addr))) {
+ skb_free_frag(data);
+ return -ENOMEM;
+ }
+ }
+ rxd->rxd1 = (unsigned int)dma_addr;
+ ring->data[i] = data;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ rxd->rxd2 = RX_DMA_LSO;
+ else
+ rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ rxd->rxd2 |= RX_DMA_PREP_ADDR64(dma_addr);
+
+ rxd->rxd3 = 0;
+ rxd->rxd4 = 0;
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ rxd->rxd5 = 0;
+ rxd->rxd6 = 0;
+ rxd->rxd7 = 0;
+ rxd->rxd8 = 0;
+ }
+ }
+
+ ring->dma_size = rx_dma_size;
+ ring->calc_idx_update = false;
+ ring->calc_idx = rx_dma_size - 1;
+ if (rx_flag == MTK_RX_FLAGS_QDMA)
+ ring->crx_idx_reg = reg_map->qdma.qcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
+ else
+ ring->crx_idx_reg = reg_map->pdma.pcrx_ptr +
+ ring_no * MTK_QRX_OFFSET;
+ /* make sure that all changes to the dma ring are flushed before we
+ * continue
+ */
+ wmb();
+
+ if (rx_flag == MTK_RX_FLAGS_QDMA) {
+ mtk_w32(eth, ring->phys,
+ reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->qdma.rst_idx);
+ } else {
+ mtk_w32(eth, ring->phys,
+ reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, rx_dma_size,
+ reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET);
+ mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no),
+ reg_map->pdma.rst_idx);
+ }
+ mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg);
+
+ return 0;
+}
+
+static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring, bool in_sram)
+{
+ u64 addr64 = 0;
+ int i;
+
+ if (ring->data && ring->dma) {
+ for (i = 0; i < ring->dma_size; i++) {
+ struct mtk_rx_dma *rxd;
+
+ if (!ring->data[i])
+ continue;
+
+ rxd = ring->dma + i * eth->soc->txrx.rxd_size;
+ if (!rxd->rxd1)
+ continue;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA))
+ addr64 = RX_DMA_GET_ADDR64(rxd->rxd2);
+
+ dma_unmap_single(eth->dma_dev, ((u64)rxd->rxd1 | addr64),
+ ring->buf_size, DMA_FROM_DEVICE);
+ mtk_rx_put_buff(ring, ring->data[i], false);
+ }
+ kfree(ring->data);
+ ring->data = NULL;
+ }
+
+ if (!in_sram && ring->dma) {
+ dma_free_coherent(eth->dma_dev,
+ ring->dma_size * eth->soc->txrx.rxd_size,
+ ring->dma, ring->phys);
+ ring->dma = NULL;
+ }
+
+ if (ring->page_pool) {
+ if (xdp_rxq_info_is_reg(&ring->xdp_q))
+ xdp_rxq_info_unreg(&ring->xdp_q);
+ page_pool_destroy(ring->page_pool);
+ ring->page_pool = NULL;
+ }
+}
+
+static int mtk_hwlro_rx_init(struct mtk_eth *eth)
+{
+ int i;
+ u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0;
+ u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0;
+
+ /* set LRO rings to auto-learn modes */
+ ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE;
+
+ /* validate LRO ring */
+ ring_ctrl_dw2 |= MTK_RING_VLD;
+
+ /* set AGE timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H;
+ ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L;
+
+ /* set max AGG timer (unit: 20us) */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME;
+
+ /* set max LRO AGG count */
+ ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L;
+ ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H;
+
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i));
+ mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i));
+ }
+
+ /* IPv4 checksum update enable */
+ lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN;
+
+ /* switch priority comparison to packet count mode */
+ lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE;
+
+ /* bandwidth threshold setting */
+ mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2);
+
+ /* auto-learn score delta setting */
+ mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA);
+
+ /* set refresh timer for altering flows to 1 sec. (unit: 20us) */
+ mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME,
+ MTK_PDMA_LRO_ALT_REFRESH_TIMER);
+
+ /* set HW LRO mode & the max aggregation count for rx packets */
+ lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff);
+
+ /* the minimal remaining room of SDL0 in RXD for lro aggregation */
+ lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL;
+
+ /* enable HW LRO */
+ lro_ctrl_dw0 |= MTK_LRO_EN;
+
+ mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3);
+ mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0);
+
+ return 0;
+}
+
+static void mtk_hwlro_rx_uninit(struct mtk_eth *eth)
+{
+ int i;
+ u32 val;
+
+ /* relinquish lro rings, flush aggregated packets */
+ mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0);
+
+ /* wait for relinquishments done */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0);
+ if (val & MTK_LRO_RING_RELINQUISH_DONE) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+
+ /* invalidate lro rings */
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i));
+
+ /* disable HW LRO */
+ mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0);
+}
+
+static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx));
+
+ /* validate the IP setting */
+ mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+}
+
+static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx)
+{
+ u32 reg_val;
+
+ reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx));
+
+ /* invalidate the IP setting */
+ mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx));
+
+ mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx));
+}
+
+static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac)
+{
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (mac->hwlro_ip[i])
+ cnt++;
+ }
+
+ return cnt;
+}
+
+static int mtk_hwlro_add_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if ((fsp->flow_type != TCP_V4_FLOW) ||
+ (!fsp->h_u.tcp_ip4_spec.ip4dst) ||
+ (fsp->location > 1))
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst);
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]);
+
+ return 0;
+}
+
+static int mtk_hwlro_del_ipaddr(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int hwlro_idx;
+
+ if (fsp->location > 1)
+ return -EINVAL;
+
+ mac->hwlro_ip[fsp->location] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location;
+
+ mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+
+ return 0;
+}
+
+static void mtk_hwlro_netdev_disable(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int i, hwlro_idx;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ mac->hwlro_ip[i] = 0;
+ hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i;
+
+ mtk_hwlro_inval_ipaddr(eth, hwlro_idx);
+ }
+
+ mac->hwlro_ip_cnt = 0;
+}
+
+static int mtk_hwlro_get_fdir_entry(struct net_device *dev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct ethtool_rx_flow_spec *fsp =
+ (struct ethtool_rx_flow_spec *)&cmd->fs;
+
+ if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip))
+ return -EINVAL;
+
+ /* only tcp dst ipv4 is meaningful, others are meaningless */
+ fsp->flow_type = TCP_V4_FLOW;
+ fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]);
+ fsp->m_u.tcp_ip4_spec.ip4dst = 0;
+
+ fsp->h_u.tcp_ip4_spec.ip4src = 0;
+ fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff;
+ fsp->h_u.tcp_ip4_spec.psrc = 0;
+ fsp->m_u.tcp_ip4_spec.psrc = 0xffff;
+ fsp->h_u.tcp_ip4_spec.pdst = 0;
+ fsp->m_u.tcp_ip4_spec.pdst = 0xffff;
+ fsp->h_u.tcp_ip4_spec.tos = 0;
+ fsp->m_u.tcp_ip4_spec.tos = 0xff;
+
+ return 0;
+}
+
+static int mtk_hwlro_get_fdir_all(struct net_device *dev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) {
+ if (cnt == cmd->rule_cnt)
+ return -EMSGSIZE;
+
+ if (mac->hwlro_ip[i]) {
+ rule_locs[cnt] = i;
+ cnt++;
+ }
+ }
+
+ cmd->rule_cnt = cnt;
+
+ return 0;
+}
+
+static netdev_features_t mtk_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ if (!(features & NETIF_F_LRO)) {
+ struct mtk_mac *mac = netdev_priv(dev);
+ int ip_cnt = mtk_hwlro_get_ip_cnt(mac);
+
+ if (ip_cnt) {
+ netdev_info(dev, "RX flow is programmed, LRO should keep on\n");
+
+ features |= NETIF_F_LRO;
+ }
+ }
+
+ return features;
+}
+
+static int mtk_set_features(struct net_device *dev, netdev_features_t features)
+{
+ netdev_features_t diff = dev->features ^ features;
+
+ if ((diff & NETIF_F_LRO) && !(features & NETIF_F_LRO))
+ mtk_hwlro_netdev_disable(dev);
+
+ return 0;
+}
+
+/* wait for DMA to finish whatever it is doing before we start using it again */
+static int mtk_dma_busy_wait(struct mtk_eth *eth)
+{
+ unsigned int reg;
+ int ret;
+ u32 val;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ reg = eth->soc->reg_map->qdma.glo_cfg;
+ else
+ reg = eth->soc->reg_map->pdma.glo_cfg;
+
+ ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val,
+ !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)),
+ 5, MTK_DMA_BUSY_TIMEOUT_US);
+ if (ret)
+ dev_err(eth->dev, "DMA init timeout\n");
+
+ return ret;
+}
+
+static int mtk_dma_init(struct mtk_eth *eth)
+{
+ int err;
+ u32 i;
+
+ if (mtk_dma_busy_wait(eth))
+ return -EBUSY;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* QDMA needs scratch memory for internal reordering of the
+ * descriptors
+ */
+ err = mtk_init_fq_dma(eth);
+ if (err)
+ return err;
+ }
+
+ err = mtk_tx_alloc(eth);
+ if (err)
+ return err;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA);
+ if (err)
+ return err;
+ }
+
+ err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL);
+ if (err)
+ return err;
+
+ if (eth->hwlro) {
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) {
+ err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO);
+ if (err)
+ return err;
+ }
+ err = mtk_hwlro_rx_init(eth);
+ if (err)
+ return err;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ /* Enable random early drop and set drop threshold
+ * automatically
+ */
+ mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN |
+ FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th);
+ mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred);
+ }
+
+ return 0;
+}
+
+static void mtk_dma_free(struct mtk_eth *eth)
+{
+ const struct mtk_soc_data *soc = eth->soc;
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++)
+ if (eth->netdev[i])
+ netdev_reset_queue(eth->netdev[i]);
+ if (!MTK_HAS_CAPS(soc->caps, MTK_SRAM) && eth->scratch_ring) {
+ dma_free_coherent(eth->dma_dev,
+ MTK_QDMA_RING_SIZE * soc->txrx.txd_size,
+ eth->scratch_ring, eth->phy_scratch_ring);
+ eth->scratch_ring = NULL;
+ eth->phy_scratch_ring = 0;
+ }
+ mtk_tx_clean(eth);
+ mtk_rx_clean(eth, &eth->rx_ring[0], MTK_HAS_CAPS(soc->caps, MTK_SRAM));
+ mtk_rx_clean(eth, &eth->rx_ring_qdma, false);
+
+ if (eth->hwlro) {
+ mtk_hwlro_rx_uninit(eth);
+ for (i = 1; i < MTK_MAX_RX_RING_NUM; i++)
+ mtk_rx_clean(eth, &eth->rx_ring[i], false);
+ }
+
+ kfree(eth->scratch_head);
+}
+
+static bool mtk_hw_reset_check(struct mtk_eth *eth)
+{
+ u32 val = mtk_r32(eth, MTK_INT_STATUS2);
+
+ return (val & MTK_FE_INT_FQ_EMPTY) || (val & MTK_FE_INT_RFIFO_UF) ||
+ (val & MTK_FE_INT_RFIFO_OV) || (val & MTK_FE_INT_TSO_FAIL) ||
+ (val & MTK_FE_INT_TSO_ALIGN) || (val & MTK_FE_INT_TSO_ILLEGAL);
+}
+
+static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ if (test_bit(MTK_RESETTING, &eth->state))
+ return;
+
+ if (!mtk_hw_reset_check(eth))
+ return;
+
+ eth->netdev[mac->id]->stats.tx_errors++;
+ netif_err(eth, tx_err, dev, "transmit timed out\n");
+
+ schedule_work(&eth->pending_work);
+}
+
+static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ eth->rx_events++;
+ if (likely(napi_schedule_prep(&eth->rx_napi))) {
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ __napi_schedule(&eth->rx_napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+
+ eth->tx_events++;
+ if (likely(napi_schedule_prep(&eth->tx_napi))) {
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ __napi_schedule(&eth->tx_napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t mtk_handle_irq(int irq, void *_eth)
+{
+ struct mtk_eth *eth = _eth;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+
+ if (mtk_r32(eth, reg_map->pdma.irq_mask) &
+ eth->soc->txrx.rx_irq_done_mask) {
+ if (mtk_r32(eth, reg_map->pdma.irq_status) &
+ eth->soc->txrx.rx_irq_done_mask)
+ mtk_handle_irq_rx(irq, _eth);
+ }
+ if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) {
+ if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT)
+ mtk_handle_irq_tx(irq, _eth);
+ }
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void mtk_poll_controller(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ mtk_handle_irq_rx(eth->irq[2], dev);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask);
+}
+#endif
+
+static int mtk_start_dma(struct mtk_eth *eth)
+{
+ u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ int err;
+
+ err = mtk_dma_init(eth);
+ if (err) {
+ mtk_dma_free(eth);
+ return err;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ val = mtk_r32(eth, reg_map->qdma.glo_cfg);
+ val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO |
+ MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE;
+
+ if (mtk_is_netsys_v2_or_greater(eth))
+ val |= MTK_MUTLI_CNT | MTK_RESV_BUF |
+ MTK_WCOMP_EN | MTK_DMAD_WR_WDONE |
+ MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN;
+ else
+ val |= MTK_RX_BT_32DWORDS;
+ mtk_w32(eth, val, reg_map->qdma.glo_cfg);
+
+ mtk_w32(eth,
+ MTK_RX_DMA_EN | rx_2b_offset |
+ MTK_RX_BT_32DWORDS | MTK_MULTI_EN,
+ reg_map->pdma.glo_cfg);
+ } else {
+ mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN |
+ MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS,
+ reg_map->pdma.glo_cfg);
+ }
+
+ return 0;
+}
+
+static void mtk_gdm_config(struct mtk_eth *eth, u32 config)
+{
+ int i;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ u32 val;
+
+ if (!eth->netdev[i])
+ continue;
+
+ val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i));
+
+ /* default setup the forward port to send frame to PDMA */
+ val &= ~0xffff;
+
+ /* Enable RX checksum */
+ val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN;
+
+ val |= config;
+
+ if (netdev_uses_dsa(eth->netdev[i]))
+ val |= MTK_GDMA_SPECIAL_TAG;
+
+ mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i));
+ }
+ /* Reset and enable PSE */
+ mtk_w32(eth, RST_GL_PSE, MTK_RST_GL);
+ mtk_w32(eth, 0, MTK_RST_GL);
+}
+
+
+static bool mtk_uses_dsa(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ return netdev_uses_dsa(dev) &&
+ dev->dsa_ptr->tag_ops->proto == DSA_TAG_PROTO_MTK;
+#else
+ return false;
+#endif
+}
+
+static int mtk_device_event(struct notifier_block *n, unsigned long event, void *ptr)
+{
+ struct mtk_mac *mac = container_of(n, struct mtk_mac, device_notifier);
+ struct mtk_eth *eth = mac->hw;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct ethtool_link_ksettings s;
+ struct net_device *ldev;
+ struct list_head *iter;
+ struct dsa_port *dp;
+
+ if (event != NETDEV_CHANGE)
+ return NOTIFY_DONE;
+
+ netdev_for_each_lower_dev(dev, ldev, iter) {
+ if (netdev_priv(ldev) == mac)
+ goto found;
+ }
+
+ return NOTIFY_DONE;
+
+found:
+ if (!dsa_slave_dev_check(dev))
+ return NOTIFY_DONE;
+
+ if (__ethtool_get_link_ksettings(dev, &s))
+ return NOTIFY_DONE;
+
+ if (s.base.speed == 0 || s.base.speed == ((__u32)-1))
+ return NOTIFY_DONE;
+
+ dp = dsa_port_from_netdev(dev);
+ if (dp->index >= MTK_QDMA_NUM_QUEUES)
+ return NOTIFY_DONE;
+
+ if (mac->speed > 0 && mac->speed <= s.base.speed)
+ s.base.speed = 0;
+
+ mtk_set_queue_speed(eth, dp->index + 3, s.base.speed);
+
+ return NOTIFY_DONE;
+}
+
+static int mtk_open(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int i, err;
+
+ err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0);
+ if (err) {
+ netdev_err(dev, "%s: could not attach PHY: %d\n", __func__,
+ err);
+ return err;
+ }
+
+ /* we run 2 netdevs on the same dma ring so we only bring it up once */
+ if (!refcount_read(&eth->dma_refcnt)) {
+ const struct mtk_soc_data *soc = eth->soc;
+ u32 gdm_config;
+ int i;
+
+ err = mtk_start_dma(eth);
+ if (err) {
+ phylink_disconnect_phy(mac->phylink);
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_start(eth->ppe[i]);
+
+ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
+ : MTK_GDMA_TO_PDMA;
+ mtk_gdm_config(eth, gdm_config);
+
+ napi_enable(&eth->tx_napi);
+ napi_enable(&eth->rx_napi);
+ mtk_tx_irq_enable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask);
+ refcount_set(&eth->dma_refcnt, 1);
+ }
+ else
+ refcount_inc(&eth->dma_refcnt);
+
+ phylink_start(mac->phylink);
+ netif_tx_start_all_queues(dev);
+
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return 0;
+
+ if (mtk_uses_dsa(dev) && !eth->prog) {
+ for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
+ struct metadata_dst *md_dst = eth->dsa_meta[i];
+
+ if (md_dst)
+ continue;
+
+ md_dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX,
+ GFP_KERNEL);
+ if (!md_dst)
+ return -ENOMEM;
+
+ md_dst->u.port_info.port_id = i;
+ eth->dsa_meta[i] = md_dst;
+ }
+ } else {
+ /* Hardware DSA untagging and VLAN RX offloading need to be
+ * disabled if at least one MAC does not use DSA.
+ */
+ u32 val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+
+ val &= ~MTK_CDMP_STAG_EN;
+ mtk_w32(eth, val, MTK_CDMP_IG_CTRL);
+
+ mtk_w32(eth, 0, MTK_CDMP_EG_CTRL);
+ }
+
+ return 0;
+}
+
+static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg)
+{
+ u32 val;
+ int i;
+
+ /* stop the dma engine */
+ spin_lock_bh(&eth->page_lock);
+ val = mtk_r32(eth, glo_cfg);
+ mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN),
+ glo_cfg);
+ spin_unlock_bh(&eth->page_lock);
+
+ /* wait for dma stop */
+ for (i = 0; i < 10; i++) {
+ val = mtk_r32(eth, glo_cfg);
+ if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) {
+ msleep(20);
+ continue;
+ }
+ break;
+ }
+}
+
+static int mtk_stop(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ int i;
+
+ phylink_stop(mac->phylink);
+
+ netif_tx_disable(dev);
+
+ phylink_disconnect_phy(mac->phylink);
+
+ /* only shutdown DMA if this is the last user */
+ if (!refcount_dec_and_test(&eth->dma_refcnt))
+ return 0;
+
+ mtk_gdm_config(eth, MTK_GDMA_DROP_ALL);
+
+ mtk_tx_irq_disable(eth, MTK_TX_DONE_INT);
+ mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask);
+ napi_disable(&eth->tx_napi);
+ napi_disable(&eth->rx_napi);
+
+ cancel_work_sync(&eth->rx_dim.work);
+ cancel_work_sync(&eth->tx_dim.work);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg);
+ mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg);
+
+ mtk_dma_free(eth);
+
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_stop(eth->ppe[i]);
+
+ return 0;
+}
+
+static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ struct bpf_prog *old_prog;
+ bool need_update;
+
+ if (eth->hwlro) {
+ NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO");
+ return -EOPNOTSUPP;
+ }
+
+ if (dev->mtu > MTK_PP_MAX_BUF_SIZE) {
+ NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!eth->prog != !!prog;
+ if (netif_running(dev) && need_update)
+ mtk_stop(dev);
+
+ old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held());
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ if (netif_running(dev) && need_update)
+ return mtk_open(dev);
+
+ return 0;
+}
+
+static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp)
+{
+ switch (xdp->command) {
+ case XDP_SETUP_PROG:
+ return mtk_xdp_setup(dev, xdp->prog, xdp->extack);
+ default:
+ return -EINVAL;
+ }
+}
+
+static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits)
+{
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ reset_bits);
+
+ usleep_range(1000, 1100);
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL,
+ reset_bits,
+ ~reset_bits);
+ mdelay(10);
+}
+
+static void mtk_clk_disable(struct mtk_eth *eth)
+{
+ int clk;
+
+ for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--)
+ clk_disable_unprepare(eth->clks[clk]);
+}
+
+static int mtk_clk_enable(struct mtk_eth *eth)
+{
+ int clk, ret;
+
+ for (clk = 0; clk < MTK_CLK_MAX ; clk++) {
+ ret = clk_prepare_enable(eth->clks[clk]);
+ if (ret)
+ goto err_disable_clks;
+ }
+
+ return 0;
+
+err_disable_clks:
+ while (--clk >= 0)
+ clk_disable_unprepare(eth->clks[clk]);
+
+ return ret;
+}
+
+static void mtk_dim_rx(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ struct dim_cq_moder cur_profile;
+ u32 val, cur;
+
+ cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode,
+ dim->profile_ix);
+ spin_lock_bh(&eth->dim_lock);
+
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
+ val &= MTK_PDMA_DELAY_TX_MASK;
+ val |= MTK_PDMA_DELAY_RX_EN;
+
+ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
+ val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT;
+
+ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
+ val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT;
+
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
+
+ spin_unlock_bh(&eth->dim_lock);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void mtk_dim_tx(struct work_struct *work)
+{
+ struct dim *dim = container_of(work, struct dim, work);
+ struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim);
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ struct dim_cq_moder cur_profile;
+ u32 val, cur;
+
+ cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode,
+ dim->profile_ix);
+ spin_lock_bh(&eth->dim_lock);
+
+ val = mtk_r32(eth, reg_map->pdma.delay_irq);
+ val &= MTK_PDMA_DELAY_RX_MASK;
+ val |= MTK_PDMA_DELAY_TX_EN;
+
+ cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK);
+ val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT;
+
+ cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK);
+ val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT;
+
+ mtk_w32(eth, val, reg_map->pdma.delay_irq);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ mtk_w32(eth, val, reg_map->qdma.delay_irq);
+
+ spin_unlock_bh(&eth->dim_lock);
+
+ dim->state = DIM_START_MEASURE;
+}
+
+static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val)
+{
+ struct mtk_eth *eth = mac->hw;
+ u32 mcr_cur, mcr_new;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return;
+
+ mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id));
+ mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK;
+
+ if (val <= 1518)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518);
+ else if (val <= 1536)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536);
+ else if (val <= 1552)
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552);
+ else
+ mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048);
+
+ if (mcr_new != mcr_cur)
+ mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id));
+}
+
+static void mtk_hw_reset(struct mtk_eth *eth)
+{
+ u32 val;
+
+ if (mtk_is_netsys_v2_or_greater(eth))
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ val = RSTCTRL_PPE0_V3;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1_V3;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
+ val |= RSTCTRL_PPE2;
+
+ val |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
+ } else if (mtk_is_netsys_v2_or_greater(eth)) {
+ val = RSTCTRL_PPE0_V2;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= RSTCTRL_PPE1;
+ } else {
+ val = RSTCTRL_PPE0;
+ }
+
+ ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val);
+
+ if (mtk_is_netsys_v3_or_greater(eth))
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+ 0x6f8ff);
+ else if (mtk_is_netsys_v2_or_greater(eth))
+ regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN,
+ 0x3ffffff);
+}
+
+static u32 mtk_hw_reset_read(struct mtk_eth *eth)
+{
+ u32 val;
+
+ regmap_read(eth->ethsys, ETHSYS_RSTCTRL, &val);
+ return val;
+}
+
+static void mtk_hw_warm_reset(struct mtk_eth *eth)
+{
+ u32 rst_mask, val;
+
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, RSTCTRL_FE,
+ RSTCTRL_FE);
+ if (readx_poll_timeout_atomic(mtk_hw_reset_read, eth, val,
+ val & RSTCTRL_FE, 1, 1000)) {
+ dev_err(eth->dev, "warm reset failed\n");
+ mtk_hw_reset(eth);
+ return;
+ }
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ rst_mask |= RSTCTRL_PPE1_V3;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
+ rst_mask |= RSTCTRL_PPE2;
+
+ rst_mask |= RSTCTRL_WDMA0 | RSTCTRL_WDMA1 | RSTCTRL_WDMA2;
+ } else if (mtk_is_netsys_v2_or_greater(eth)) {
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0_V2;
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ rst_mask |= RSTCTRL_PPE1;
+ } else {
+ rst_mask = RSTCTRL_ETH | RSTCTRL_PPE0;
+ }
+
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, rst_mask);
+
+ udelay(1);
+ val = mtk_hw_reset_read(eth);
+ if (!(val & rst_mask))
+ dev_err(eth->dev, "warm reset stage0 failed %08x (%08x)\n",
+ val, rst_mask);
+
+ rst_mask |= RSTCTRL_FE;
+ regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, rst_mask, ~rst_mask);
+
+ udelay(1);
+ val = mtk_hw_reset_read(eth);
+ if (val & rst_mask)
+ dev_err(eth->dev, "warm reset stage1 failed %08x (%08x)\n",
+ val, rst_mask);
+}
+
+static bool mtk_hw_check_dma_hang(struct mtk_eth *eth)
+{
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ bool gmac1_tx, gmac2_tx, gdm1_tx, gdm2_tx;
+ bool oq_hang, cdm1_busy, adma_busy;
+ bool wtx_busy, cdm_full, oq_free;
+ u32 wdidx, val, gdm1_fc, gdm2_fc;
+ bool qfsm_hang, qfwd_hang;
+ bool ret = false;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ return false;
+
+ /* WDMA sanity checks */
+ wdidx = mtk_r32(eth, reg_map->wdma_base[0] + 0xc);
+
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x204);
+ wtx_busy = FIELD_GET(MTK_TX_DMA_BUSY, val);
+
+ val = mtk_r32(eth, reg_map->wdma_base[0] + 0x230);
+ cdm_full = !FIELD_GET(MTK_CDM_TXFIFO_RDY, val);
+
+ oq_free = (!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(24, 16)) &&
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x4) & GENMASK(8, 0)) &&
+ !(mtk_r32(eth, reg_map->pse_oq_sta + 0x10) & GENMASK(24, 16)));
+
+ if (wdidx == eth->reset.wdidx && wtx_busy && cdm_full && oq_free) {
+ if (++eth->reset.wdma_hang_count > 2) {
+ eth->reset.wdma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ /* QDMA sanity checks */
+ qfsm_hang = !!mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x234);
+ qfwd_hang = !mtk_r32(eth, reg_map->qdma.qtx_cfg + 0x308);
+
+ gdm1_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM1_FSM)) > 0;
+ gdm2_tx = FIELD_GET(GENMASK(31, 16), mtk_r32(eth, MTK_FE_GDM2_FSM)) > 0;
+ gmac1_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(0))) != 1;
+ gmac2_tx = FIELD_GET(GENMASK(31, 24), mtk_r32(eth, MTK_MAC_FSM(1))) != 1;
+ gdm1_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x24);
+ gdm2_fc = mtk_r32(eth, reg_map->gdm1_cnt + 0x64);
+
+ if (qfsm_hang && qfwd_hang &&
+ ((gdm1_tx && gmac1_tx && gdm1_fc < 1) ||
+ (gdm2_tx && gmac2_tx && gdm2_fc < 1))) {
+ if (++eth->reset.qdma_hang_count > 2) {
+ eth->reset.qdma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ /* ADMA sanity checks */
+ oq_hang = !!(mtk_r32(eth, reg_map->pse_oq_sta) & GENMASK(8, 0));
+ cdm1_busy = !!(mtk_r32(eth, MTK_FE_CDM1_FSM) & GENMASK(31, 16));
+ adma_busy = !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & GENMASK(4, 0)) &&
+ !(mtk_r32(eth, reg_map->pdma.adma_rx_dbg0) & BIT(6));
+
+ if (oq_hang && cdm1_busy && adma_busy) {
+ if (++eth->reset.adma_hang_count > 2) {
+ eth->reset.adma_hang_count = 0;
+ ret = true;
+ }
+ goto out;
+ }
+
+ eth->reset.wdma_hang_count = 0;
+ eth->reset.qdma_hang_count = 0;
+ eth->reset.adma_hang_count = 0;
+out:
+ eth->reset.wdidx = wdidx;
+
+ return ret;
+}
+
+static void mtk_hw_reset_monitor_work(struct work_struct *work)
+{
+ struct delayed_work *del_work = to_delayed_work(work);
+ struct mtk_eth *eth = container_of(del_work, struct mtk_eth,
+ reset.monitor_work);
+
+ if (test_bit(MTK_RESETTING, &eth->state))
+ goto out;
+
+ /* DMA stuck checks */
+ if (mtk_hw_check_dma_hang(eth))
+ schedule_work(&eth->pending_work);
+
+out:
+ schedule_delayed_work(&eth->reset.monitor_work,
+ MTK_DMA_MONITOR_TIMEOUT);
+}
+
+static int mtk_hw_init(struct mtk_eth *eth, bool reset)
+{
+ u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA |
+ ETHSYS_DMA_AG_MAP_PPE;
+ const struct mtk_reg_map *reg_map = eth->soc->reg_map;
+ int i, val, ret;
+
+ if (!reset && test_and_set_bit(MTK_HW_INIT, &eth->state))
+ return 0;
+
+ if (!reset) {
+ pm_runtime_enable(eth->dev);
+ pm_runtime_get_sync(eth->dev);
+
+ ret = mtk_clk_enable(eth);
+ if (ret)
+ goto err_disable_pm;
+ }
+
+ if (eth->ethsys)
+ regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask,
+ of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ ret = device_reset(eth->dev);
+ if (ret) {
+ dev_err(eth->dev, "MAC reset failed!\n");
+ goto err_disable_pm;
+ }
+
+ /* set interrupt delays based on current Net DIM sample */
+ mtk_dim_rx(&eth->rx_dim.work);
+ mtk_dim_tx(&eth->tx_dim.work);
+
+ /* disable delay and normal interrupt */
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+ return 0;
+ }
+
+ msleep(100);
+
+ if (reset)
+ mtk_hw_warm_reset(eth);
+ else
+ mtk_hw_reset(eth);
+
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ /* Set FE to PDMAv2 if necessary */
+ val = mtk_r32(eth, MTK_FE_GLO_MISC);
+ mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC);
+ }
+
+ if (eth->pctl) {
+ /* Set GE2 driving and slew rate */
+ regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00);
+
+ /* set GE2 TDSEL */
+ regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5);
+
+ /* set GE2 TUNE */
+ regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0);
+ }
+
+ /* Set linkdown as the default for each GMAC. Its own MCR would be set
+ * up with the more appropriate value when mtk_mac_config call is being
+ * invoked.
+ */
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ struct net_device *dev = eth->netdev[i];
+
+ if (!dev)
+ continue;
+
+ mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i));
+ mtk_set_mcr_max_rx(netdev_priv(dev),
+ dev->mtu + MTK_RX_ETH_HLEN);
+ }
+
+ /* Indicates CDM to parse the MTK special tag from CPU
+ * which also is working out for untag packets.
+ */
+ val = mtk_r32(eth, MTK_CDMQ_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL);
+ if (mtk_is_netsys_v1(eth)) {
+ val = mtk_r32(eth, MTK_CDMP_IG_CTRL);
+ mtk_w32(eth, val | MTK_CDMP_STAG_EN, MTK_CDMP_IG_CTRL);
+
+ mtk_w32(eth, 1, MTK_CDMP_EG_CTRL);
+ }
+
+ /* set interrupt delays based on current Net DIM sample */
+ mtk_dim_rx(&eth->rx_dim.work);
+ mtk_dim_tx(&eth->tx_dim.work);
+
+ /* disable delay and normal interrupt */
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+
+ /* FE int grouping */
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4);
+ mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp);
+ mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4);
+ mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP);
+
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ /* PSE should not drop port1, port8 and port9 packets */
+ mtk_w32(eth, 0x00000302, PSE_DROP_CFG);
+
+ /* GDM and CDM Threshold */
+ mtk_w32(eth, 0x00000707, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x00000077, MTK_CDMW1_THRES);
+
+ /* Disable GDM1 RX CRC stripping */
+ mtk_m32(eth, MTK_GDMA_STRP_CRC, 0, MTK_GDMA_FWD_CFG(0));
+
+ /* PSE GDM3 MIB counter has incorrect hw default values,
+ * so the driver ought to read clear the values beforehand
+ * in case ethtool retrieve wrong mib values.
+ */
+ for (i = 0; i < 0x80; i += 0x4)
+ mtk_r32(eth, reg_map->gdm1_cnt + 0x100 + i);
+ } else if (!mtk_is_netsys_v1(eth)) {
+ /* PSE should not drop port8 and port9 packets from WDMA Tx */
+ mtk_w32(eth, 0x00000300, PSE_DROP_CFG);
+
+ /* PSE should drop packets to port 8/9 on WDMA Rx ring full */
+ mtk_w32(eth, 0x00000300, PSE_PPE0_DROP);
+
+ /* PSE Free Queue Flow Control */
+ mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2);
+
+ /* PSE config input queue threshold */
+ mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1));
+ mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2));
+ mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7));
+ mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8));
+
+ /* PSE config output queue threshold */
+ mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1));
+ mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2));
+ mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3));
+ mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4));
+ mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5));
+ mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(7));
+ mtk_w32(eth, 0x00060006, PSE_OQ_TH(8));
+
+ /* GDM and CDM Threshold */
+ mtk_w32(eth, 0x00000004, MTK_GDM2_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME0_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDME1_THRES);
+ mtk_w32(eth, 0x00000004, MTK_CDMM_THRES);
+ }
+
+ return 0;
+
+err_disable_pm:
+ if (!reset) {
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+ }
+
+ return ret;
+}
+
+static int mtk_hw_deinit(struct mtk_eth *eth)
+{
+ if (!test_and_clear_bit(MTK_HW_INIT, &eth->state))
+ return 0;
+
+ mtk_clk_disable(eth);
+
+ pm_runtime_put_sync(eth->dev);
+ pm_runtime_disable(eth->dev);
+
+ return 0;
+}
+
+static void mtk_uninit(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ phylink_disconnect_phy(mac->phylink);
+ mtk_tx_irq_disable(eth, ~0);
+ mtk_rx_irq_disable(eth, ~0);
+}
+
+static int mtk_change_mtu(struct net_device *dev, int new_mtu)
+{
+ int length = new_mtu + MTK_RX_ETH_HLEN;
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+
+ if (rcu_access_pointer(eth->prog) &&
+ length > MTK_PP_MAX_BUF_SIZE) {
+ netdev_err(dev, "Invalid MTU for XDP mode\n");
+ return -EINVAL;
+ }
+
+ mtk_set_mcr_max_rx(mac, length);
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ return phylink_mii_ioctl(mac->phylink, ifr, cmd);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static void mtk_prepare_for_reset(struct mtk_eth *eth)
+{
+ u32 val;
+ int i;
+
+ /* set FE PPE ports link down */
+ for (i = MTK_GMAC1_ID;
+ i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
+ i += 2) {
+ val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) | MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val |= MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
+ val |= MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
+ mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
+ }
+
+ /* adjust PPE configurations to prepare for reset */
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_prepare_reset(eth->ppe[i]);
+
+ /* disable NETSYS interrupts */
+ mtk_w32(eth, 0, MTK_FE_INT_ENABLE);
+
+ /* force link down GMAC */
+ for (i = 0; i < 2; i++) {
+ val = mtk_r32(eth, MTK_MAC_MCR(i)) & ~MAC_MCR_FORCE_LINK;
+ mtk_w32(eth, val, MTK_MAC_MCR(i));
+ }
+}
+
+static void mtk_pending_work(struct work_struct *work)
+{
+ struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work);
+ unsigned long restart = 0;
+ u32 val;
+ int i;
+
+ rtnl_lock();
+ set_bit(MTK_RESETTING, &eth->state);
+
+ mtk_prepare_for_reset(eth);
+ mtk_wed_fe_reset();
+ /* Run again reset preliminary configuration in order to avoid any
+ * possible race during FE reset since it can run releasing RTNL lock.
+ */
+ mtk_prepare_for_reset(eth);
+
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i] || !netif_running(eth->netdev[i]))
+ continue;
+
+ mtk_stop(eth->netdev[i]);
+ __set_bit(i, &restart);
+ }
+
+ usleep_range(15000, 16000);
+
+ if (eth->dev->pins)
+ pinctrl_select_state(eth->dev->pins->p,
+ eth->dev->pins->default_state);
+ mtk_hw_init(eth, true);
+
+ /* restart DMA and enable IRQs */
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i] || !test_bit(i, &restart))
+ continue;
+
+ if (mtk_open(eth->netdev[i])) {
+ netif_alert(eth, ifup, eth->netdev[i],
+ "Driver up/down cycle failed\n");
+ dev_close(eth->netdev[i]);
+ }
+ }
+
+ /* set FE PPE ports link up */
+ for (i = MTK_GMAC1_ID;
+ i <= (mtk_is_netsys_v3_or_greater(eth) ? MTK_GMAC3_ID : MTK_GMAC2_ID);
+ i += 2) {
+ val = mtk_r32(eth, MTK_FE_GLO_CFG(i)) & ~MTK_FE_LINK_DOWN_P(PSE_PPE0_PORT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1))
+ val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE1_PORT);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE2))
+ val &= ~MTK_FE_LINK_DOWN_P(PSE_PPE2_PORT);
+
+ mtk_w32(eth, val, MTK_FE_GLO_CFG(i));
+ }
+
+ clear_bit(MTK_RESETTING, &eth->state);
+
+ mtk_wed_fe_reset_complete();
+
+ rtnl_unlock();
+}
+
+static int mtk_free_dev(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+ free_netdev(eth->netdev[i]);
+ }
+
+ for (i = 0; i < ARRAY_SIZE(eth->dsa_meta); i++) {
+ if (!eth->dsa_meta[i])
+ break;
+ metadata_dst_free(eth->dsa_meta[i]);
+ }
+
+ return 0;
+}
+
+static int mtk_unreg_dev(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ struct mtk_mac *mac;
+ if (!eth->netdev[i])
+ continue;
+ mac = netdev_priv(eth->netdev[i]);
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ unregister_netdevice_notifier(&mac->device_notifier);
+ unregister_netdev(eth->netdev[i]);
+ }
+
+ return 0;
+}
+
+static void mtk_sgmii_destroy(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++)
+ mtk_pcs_lynxi_destroy(eth->sgmii_pcs[i]);
+}
+
+static int mtk_cleanup(struct mtk_eth *eth)
+{
+ mtk_sgmii_destroy(eth);
+ mtk_unreg_dev(eth);
+ mtk_free_dev(eth);
+ cancel_work_sync(&eth->pending_work);
+ cancel_delayed_work_sync(&eth->reset.monitor_work);
+
+ return 0;
+}
+
+static int mtk_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(ndev);
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ return phylink_ethtool_ksettings_get(mac->phylink, cmd);
+}
+
+static int mtk_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct mtk_mac *mac = netdev_priv(ndev);
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ return phylink_ethtool_ksettings_set(mac->phylink, cmd);
+}
+
+static void mtk_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info));
+ info->n_stats = ARRAY_SIZE(mtk_ethtool_stats);
+}
+
+static u32 mtk_get_msglevel(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ return mac->hw->msg_enable;
+}
+
+static void mtk_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ mac->hw->msg_enable = value;
+}
+
+static int mtk_nway_reset(struct net_device *dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return -EBUSY;
+
+ if (!mac->phylink)
+ return -ENOTSUPP;
+
+ return phylink_ethtool_nway_reset(mac->phylink);
+}
+
+static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS: {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) {
+ memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ if (mtk_page_pool_enabled(mac->hw))
+ page_pool_ethtool_stats_get_strings(data);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static int mtk_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS: {
+ int count = ARRAY_SIZE(mtk_ethtool_stats);
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ if (mtk_page_pool_enabled(mac->hw))
+ count += page_pool_ethtool_stats_get_count();
+ return count;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data)
+{
+ struct page_pool_stats stats = {};
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) {
+ struct mtk_rx_ring *ring = &eth->rx_ring[i];
+
+ if (!ring->page_pool)
+ continue;
+
+ page_pool_get_stats(ring->page_pool, &stats);
+ }
+ page_pool_ethtool_stats_get(data, &stats);
+}
+
+static void mtk_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_hw_stats *hwstats = mac->hw_stats;
+ u64 *data_src, *data_dst;
+ unsigned int start;
+ int i;
+
+ if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state)))
+ return;
+
+ if (netif_running(dev) && netif_device_present(dev)) {
+ if (spin_trylock_bh(&hwstats->stats_lock)) {
+ mtk_stats_update_mac(mac);
+ spin_unlock_bh(&hwstats->stats_lock);
+ }
+ }
+
+ data_src = (u64 *)hwstats;
+
+ do {
+ data_dst = data;
+ start = u64_stats_fetch_begin(&hwstats->syncp);
+
+ for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++)
+ *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset);
+ if (mtk_page_pool_enabled(mac->hw))
+ mtk_ethtool_pp_stats(mac->hw, data_dst);
+ } while (u64_stats_fetch_retry(&hwstats->syncp, start));
+}
+
+static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ if (dev->hw_features & NETIF_F_LRO) {
+ cmd->data = MTK_MAX_RX_RING_NUM;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRLCNT:
+ if (dev->hw_features & NETIF_F_LRO) {
+ struct mtk_mac *mac = netdev_priv(dev);
+
+ cmd->rule_cnt = mac->hwlro_ip_cnt;
+ ret = 0;
+ }
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_entry(dev, cmd);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_get_fdir_all(dev, cmd,
+ rule_locs);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
+{
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_add_ipaddr(dev, cmd);
+ break;
+ case ETHTOOL_SRXCLSRLDEL:
+ if (dev->hw_features & NETIF_F_LRO)
+ ret = mtk_hwlro_del_ipaddr(dev, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ unsigned int queue = 0;
+
+ if (netdev_uses_dsa(dev))
+ queue = skb_get_queue_mapping(skb) + 3;
+ else
+ queue = mac->id;
+
+ if (queue >= dev->num_tx_queues)
+ queue = 0;
+
+ return queue;
+}
+
+static const struct ethtool_ops mtk_ethtool_ops = {
+ .get_link_ksettings = mtk_get_link_ksettings,
+ .set_link_ksettings = mtk_set_link_ksettings,
+ .get_drvinfo = mtk_get_drvinfo,
+ .get_msglevel = mtk_get_msglevel,
+ .set_msglevel = mtk_set_msglevel,
+ .nway_reset = mtk_nway_reset,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mtk_get_strings,
+ .get_sset_count = mtk_get_sset_count,
+ .get_ethtool_stats = mtk_get_ethtool_stats,
+ .get_rxnfc = mtk_get_rxnfc,
+ .set_rxnfc = mtk_set_rxnfc,
+};
+
+static const struct net_device_ops mtk_netdev_ops = {
+ .ndo_uninit = mtk_uninit,
+ .ndo_open = mtk_open,
+ .ndo_stop = mtk_stop,
+ .ndo_start_xmit = mtk_start_xmit,
+ .ndo_set_mac_address = mtk_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_eth_ioctl = mtk_do_ioctl,
+ .ndo_change_mtu = mtk_change_mtu,
+ .ndo_tx_timeout = mtk_tx_timeout,
+ .ndo_get_stats64 = mtk_get_stats64,
+ .ndo_fix_features = mtk_fix_features,
+ .ndo_set_features = mtk_set_features,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = mtk_poll_controller,
+#endif
+ .ndo_setup_tc = mtk_eth_setup_tc,
+ .ndo_bpf = mtk_xdp,
+ .ndo_xdp_xmit = mtk_xdp_xmit,
+ .ndo_select_queue = mtk_select_queue,
+};
+
+static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
+{
+ const __be32 *_id = of_get_property(np, "reg", NULL);
+ phy_interface_t phy_mode;
+ struct phylink *phylink;
+ struct mtk_mac *mac;
+ int id, err;
+ int txqs = 1;
+ u32 val;
+
+ if (!_id) {
+ dev_err(eth->dev, "missing mac id\n");
+ return -EINVAL;
+ }
+
+ id = be32_to_cpup(_id);
+ if (id >= MTK_MAX_DEVS) {
+ dev_err(eth->dev, "%d is not a valid mac id\n", id);
+ return -EINVAL;
+ }
+
+ if (eth->netdev[id]) {
+ dev_err(eth->dev, "duplicate mac id found: %d\n", id);
+ return -EINVAL;
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA))
+ txqs = MTK_QDMA_NUM_QUEUES;
+
+ eth->netdev[id] = alloc_etherdev_mqs(sizeof(*mac), txqs, 1);
+ if (!eth->netdev[id]) {
+ dev_err(eth->dev, "alloc_etherdev failed\n");
+ return -ENOMEM;
+ }
+ mac = netdev_priv(eth->netdev[id]);
+ eth->mac[id] = mac;
+ mac->id = id;
+ mac->hw = eth;
+ mac->of_node = np;
+
+ err = of_get_ethdev_address(mac->of_node, eth->netdev[id]);
+ if (err == -EPROBE_DEFER)
+ return err;
+
+ if (err) {
+ /* If the mac address is invalid, use random mac address */
+ eth_hw_addr_random(eth->netdev[id]);
+ dev_err(eth->dev, "generated random MAC address %pM\n",
+ eth->netdev[id]->dev_addr);
+ }
+
+ memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip));
+ mac->hwlro_ip_cnt = 0;
+
+ mac->hw_stats = devm_kzalloc(eth->dev,
+ sizeof(*mac->hw_stats),
+ GFP_KERNEL);
+ if (!mac->hw_stats) {
+ dev_err(eth->dev, "failed to allocate counter memory\n");
+ err = -ENOMEM;
+ goto free_netdev;
+ }
+ spin_lock_init(&mac->hw_stats->stats_lock);
+ u64_stats_init(&mac->hw_stats->syncp);
+
+ if (mtk_is_netsys_v3_or_greater(eth))
+ mac->hw_stats->reg_offset = id * 0x80;
+ else
+ mac->hw_stats->reg_offset = id * 0x40;
+
+ /* phylink create */
+ err = of_get_phy_mode(np, &phy_mode);
+ if (err) {
+ dev_err(eth->dev, "incorrect phy-mode\n");
+ goto free_netdev;
+ }
+
+ /* mac config is not set */
+ mac->interface = PHY_INTERFACE_MODE_NA;
+ mac->speed = SPEED_UNKNOWN;
+
+ mac->phylink_config.dev = &eth->netdev[id]->dev;
+ mac->phylink_config.type = PHYLINK_NETDEV;
+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD;
+
+ /* MT7623 gmac0 is now missing its speed-specific PLL configuration
+ * in its .mac_config method (since state->speed is not valid there.
+ * Disable support for MII, GMII and RGMII.
+ */
+ if (!mac->hw->soc->disable_pll_modes || mac->id != 0) {
+ __set_bit(PHY_INTERFACE_MODE_MII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_GMII,
+ mac->phylink_config.supported_interfaces);
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII))
+ phy_interface_set_rgmii(mac->phylink_config.supported_interfaces);
+ }
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id)
+ __set_bit(PHY_INTERFACE_MODE_TRGMII,
+ mac->phylink_config.supported_interfaces);
+
+ /* TRGMII is not permitted on MT7621 if using DDR2 */
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII_MT7621_CLK)) {
+ regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val);
+ if (val & SYSCFG_DRAM_TYPE_DDR2)
+ __clear_bit(PHY_INTERFACE_MODE_TRGMII,
+ mac->phylink_config.supported_interfaces);
+ }
+
+ if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) {
+ __set_bit(PHY_INTERFACE_MODE_SGMII,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_1000BASEX,
+ mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_2500BASEX,
+ mac->phylink_config.supported_interfaces);
+ }
+
+ if (mtk_is_netsys_v3_or_greater(mac->hw) &&
+ MTK_HAS_CAPS(mac->hw->soc->caps, MTK_ESW_BIT) &&
+ id == MTK_GMAC1_ID) {
+ mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
+ MAC_SYM_PAUSE |
+ MAC_10000FD;
+ phy_interface_zero(mac->phylink_config.supported_interfaces);
+ __set_bit(PHY_INTERFACE_MODE_INTERNAL,
+ mac->phylink_config.supported_interfaces);
+ }
+
+ phylink = phylink_create(&mac->phylink_config,
+ of_fwnode_handle(mac->of_node),
+ phy_mode, &mtk_phylink_ops);
+ if (IS_ERR(phylink)) {
+ err = PTR_ERR(phylink);
+ goto free_netdev;
+ }
+
+ mac->phylink = phylink;
+
+ SET_NETDEV_DEV(eth->netdev[id], eth->dev);
+ eth->netdev[id]->watchdog_timeo = 5 * HZ;
+ eth->netdev[id]->netdev_ops = &mtk_netdev_ops;
+ eth->netdev[id]->base_addr = (unsigned long)eth->base;
+
+ eth->netdev[id]->hw_features = eth->soc->hw_features;
+ if (eth->hwlro)
+ eth->netdev[id]->hw_features |= NETIF_F_LRO;
+
+ eth->netdev[id]->vlan_features = eth->soc->hw_features &
+ ~NETIF_F_HW_VLAN_CTAG_TX;
+ eth->netdev[id]->features |= eth->soc->hw_features;
+ eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops;
+
+ eth->netdev[id]->irq = eth->irq[0];
+ eth->netdev[id]->dev.of_node = np;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
+ else
+ eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) {
+ mac->device_notifier.notifier_call = mtk_device_event;
+ register_netdevice_notifier(&mac->device_notifier);
+ }
+
+ if (mtk_page_pool_enabled(eth))
+ eth->netdev[id]->xdp_features = NETDEV_XDP_ACT_BASIC |
+ NETDEV_XDP_ACT_REDIRECT |
+ NETDEV_XDP_ACT_NDO_XMIT |
+ NETDEV_XDP_ACT_NDO_XMIT_SG;
+
+ return 0;
+
+free_netdev:
+ free_netdev(eth->netdev[id]);
+ return err;
+}
+
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev)
+{
+ struct net_device *dev, *tmp;
+ LIST_HEAD(dev_list);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ dev = eth->netdev[i];
+
+ if (!dev || !(dev->flags & IFF_UP))
+ continue;
+
+ list_add_tail(&dev->close_list, &dev_list);
+ }
+
+ dev_close_many(&dev_list, false);
+
+ eth->dma_dev = dma_dev;
+
+ list_for_each_entry_safe(dev, tmp, &dev_list, close_list) {
+ list_del_init(&dev->close_list);
+ dev_open(dev, NULL);
+ }
+
+ rtnl_unlock();
+}
+
+static int mtk_sgmii_init(struct mtk_eth *eth)
+{
+ struct device_node *np;
+ struct regmap *regmap;
+ u32 flags;
+ int i;
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ np = of_parse_phandle(eth->dev->of_node, "mediatek,sgmiisys", i);
+ if (!np)
+ break;
+
+ regmap = syscon_node_to_regmap(np);
+ flags = 0;
+ if (of_property_read_bool(np, "mediatek,pnswap"))
+ flags |= MTK_SGMII_FLAG_PN_SWAP;
+
+ of_node_put(np);
+
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ eth->sgmii_pcs[i] = mtk_pcs_lynxi_create(eth->dev, regmap,
+ eth->soc->ana_rgc3,
+ flags);
+ }
+
+ return 0;
+}
+
+static int mtk_probe(struct platform_device *pdev)
+{
+ struct resource *res = NULL, *res_sram;
+ struct device_node *mac_np;
+ struct mtk_eth *eth;
+ int err, i;
+
+ eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL);
+ if (!eth)
+ return -ENOMEM;
+
+ eth->soc = of_device_get_match_data(&pdev->dev);
+
+ eth->dev = &pdev->dev;
+ eth->dma_dev = &pdev->dev;
+ eth->base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(eth->base))
+ return PTR_ERR(eth->base);
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628))
+ eth->ip_align = NET_IP_ALIGN;
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
+ /* SRAM is actual memory and supports transparent access just like DRAM.
+ * Hence we don't require __iomem being set and don't need to use accessor
+ * functions to read from or write to SRAM.
+ */
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ eth->sram_base = (void __force *)devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(eth->sram_base))
+ return PTR_ERR(eth->sram_base);
+ } else {
+ eth->sram_base = (void __force *)eth->base + MTK_ETH_SRAM_OFFSET;
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_36BIT_DMA)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36));
+ if (err) {
+ dev_err(&pdev->dev, "Wrong DMA config\n");
+ return -EINVAL;
+ }
+ }
+
+ spin_lock_init(&eth->page_lock);
+ spin_lock_init(&eth->tx_irq_lock);
+ spin_lock_init(&eth->rx_irq_lock);
+ spin_lock_init(&eth->dim_lock);
+
+ eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ INIT_WORK(&eth->rx_dim.work, mtk_dim_rx);
+ INIT_DELAYED_WORK(&eth->reset.monitor_work, mtk_hw_reset_monitor_work);
+
+ eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
+ INIT_WORK(&eth->tx_dim.work, mtk_dim_tx);
+
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,ethsys");
+ if (IS_ERR(eth->ethsys)) {
+ dev_err(&pdev->dev, "no ethsys regmap found\n");
+ return PTR_ERR(eth->ethsys);
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) {
+ eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,infracfg");
+ if (IS_ERR(eth->infra)) {
+ dev_err(&pdev->dev, "no infracfg regmap found\n");
+ return PTR_ERR(eth->infra);
+ }
+ }
+
+ if (of_dma_is_coherent(pdev->dev.of_node)) {
+ struct regmap *cci;
+
+ cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "cci-control-port");
+ /* enable CPU/bus coherency */
+ if (!IS_ERR(cci))
+ regmap_write(cci, 0, 3);
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) {
+ err = mtk_sgmii_init(eth);
+
+ if (err)
+ return err;
+ }
+
+ if (eth->soc->required_pctl) {
+ eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "mediatek,pctl");
+ if (IS_ERR(eth->pctl)) {
+ dev_err(&pdev->dev, "no pctl regmap found\n");
+ err = PTR_ERR(eth->pctl);
+ goto err_destroy_sgmii;
+ }
+ }
+
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ err = -EINVAL;
+ goto err_destroy_sgmii;
+ }
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SRAM)) {
+ if (mtk_is_netsys_v3_or_greater(eth)) {
+ res_sram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res_sram) {
+ err = -EINVAL;
+ goto err_destroy_sgmii;
+ }
+ eth->phy_scratch_ring = res_sram->start;
+ } else {
+ eth->phy_scratch_ring = res->start + MTK_ETH_SRAM_OFFSET;
+ }
+ }
+ }
+
+ if (eth->soc->offload_version) {
+ for (i = 0;; i++) {
+ struct device_node *np;
+ phys_addr_t wdma_phy;
+ u32 wdma_base;
+
+ if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base))
+ break;
+
+ np = of_parse_phandle(pdev->dev.of_node,
+ "mediatek,wed", i);
+ if (!np)
+ break;
+
+ wdma_base = eth->soc->reg_map->wdma_base[i];
+ wdma_phy = res ? res->start + wdma_base : 0;
+ mtk_wed_add_hw(np, eth, eth->base + wdma_base,
+ wdma_phy, i);
+ }
+ }
+
+ for (i = 0; i < 3; i++) {
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0)
+ eth->irq[i] = eth->irq[0];
+ else
+ eth->irq[i] = platform_get_irq(pdev, i);
+ if (eth->irq[i] < 0) {
+ dev_err(&pdev->dev, "no IRQ%d resource found\n", i);
+ err = -ENXIO;
+ goto err_wed_exit;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+ eth->clks[i] = devm_clk_get(eth->dev,
+ mtk_clks_source_name[i]);
+ if (IS_ERR(eth->clks[i])) {
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) {
+ err = -EPROBE_DEFER;
+ goto err_wed_exit;
+ }
+ if (eth->soc->required_clks & BIT(i)) {
+ dev_err(&pdev->dev, "clock %s not found\n",
+ mtk_clks_source_name[i]);
+ err = -EINVAL;
+ goto err_wed_exit;
+ }
+ eth->clks[i] = NULL;
+ }
+ }
+
+ eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
+ INIT_WORK(&eth->pending_work, mtk_pending_work);
+
+ err = mtk_hw_init(eth, false);
+ if (err)
+ goto err_wed_exit;
+
+ eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO);
+
+ for_each_child_of_node(pdev->dev.of_node, mac_np) {
+ if (!of_device_is_compatible(mac_np,
+ "mediatek,eth-mac"))
+ continue;
+
+ if (!of_device_is_available(mac_np))
+ continue;
+
+ err = mtk_add_mac(eth, mac_np);
+ if (err) {
+ of_node_put(mac_np);
+ goto err_deinit_hw;
+ }
+ }
+
+ if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) {
+ err = devm_request_irq(eth->dev, eth->irq[0],
+ mtk_handle_irq, 0,
+ dev_name(eth->dev), eth);
+ } else {
+ err = devm_request_irq(eth->dev, eth->irq[1],
+ mtk_handle_irq_tx, 0,
+ dev_name(eth->dev), eth);
+ if (err)
+ goto err_free_dev;
+
+ err = devm_request_irq(eth->dev, eth->irq[2],
+ mtk_handle_irq_rx, 0,
+ dev_name(eth->dev), eth);
+ }
+ if (err)
+ goto err_free_dev;
+
+ /* No MT7628/88 support yet */
+ if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) {
+ err = mtk_mdio_init(eth);
+ if (err)
+ goto err_free_dev;
+ }
+
+ if (eth->soc->offload_version) {
+ u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1;
+
+ num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
+ for (i = 0; i < num_ppe; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+
+ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i);
+
+ if (!eth->ppe[i]) {
+ err = -ENOMEM;
+ goto err_deinit_ppe;
+ }
+ }
+
+ err = mtk_eth_offload_init(eth);
+ if (err)
+ goto err_deinit_ppe;
+ }
+
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+
+ err = register_netdev(eth->netdev[i]);
+ if (err) {
+ dev_err(eth->dev, "error bringing up device\n");
+ goto err_deinit_ppe;
+ } else
+ netif_info(eth, probe, eth->netdev[i],
+ "mediatek frame engine at 0x%08lx, irq %d\n",
+ eth->netdev[i]->base_addr, eth->irq[0]);
+ }
+
+ /* we run 2 devices on the same DMA ring so we need a dummy device
+ * for NAPI to work
+ */
+ init_dummy_netdev(&eth->dummy_dev);
+ netif_napi_add(&eth->dummy_dev, &eth->tx_napi, mtk_napi_tx);
+ netif_napi_add(&eth->dummy_dev, &eth->rx_napi, mtk_napi_rx);
+
+ platform_set_drvdata(pdev, eth);
+ schedule_delayed_work(&eth->reset.monitor_work,
+ MTK_DMA_MONITOR_TIMEOUT);
+
+ return 0;
+
+err_deinit_ppe:
+ mtk_ppe_deinit(eth);
+ mtk_mdio_cleanup(eth);
+err_free_dev:
+ mtk_free_dev(eth);
+err_deinit_hw:
+ mtk_hw_deinit(eth);
+err_wed_exit:
+ mtk_wed_exit();
+err_destroy_sgmii:
+ mtk_sgmii_destroy(eth);
+
+ return err;
+}
+
+static int mtk_remove(struct platform_device *pdev)
+{
+ struct mtk_eth *eth = platform_get_drvdata(pdev);
+ struct mtk_mac *mac;
+ int i;
+
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAX_DEVS; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ mac = netdev_priv(eth->netdev[i]);
+ phylink_disconnect_phy(mac->phylink);
+ }
+
+ mtk_wed_exit();
+ mtk_hw_deinit(eth);
+
+ netif_napi_del(&eth->tx_napi);
+ netif_napi_del(&eth->rx_napi);
+ mtk_cleanup(eth);
+ mtk_mdio_cleanup(eth);
+
+ return 0;
+}
+
+static const struct mtk_soc_data mt2701_data = {
+ .reg_map = &mtk_reg_map,
+ .caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .version = 1,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+static const struct mtk_soc_data mt7621_data = {
+ .reg_map = &mtk_reg_map,
+ .caps = MT7621_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7621_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 1,
+ .offload_version = 1,
+ .hash_offset = 2,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+static const struct mtk_soc_data mt7622_data = {
+ .reg_map = &mtk_reg_map,
+ .ana_rgc3 = 0x2028,
+ .caps = MT7622_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7622_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 1,
+ .offload_version = 2,
+ .hash_offset = 2,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+static const struct mtk_soc_data mt7623_data = {
+ .reg_map = &mtk_reg_map,
+ .caps = MT7623_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7623_CLKS_BITMAP,
+ .required_pctl = true,
+ .version = 1,
+ .offload_version = 1,
+ .hash_offset = 2,
+ .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE,
+ .disable_pll_modes = true,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+static const struct mtk_soc_data mt7629_data = {
+ .reg_map = &mtk_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7629_CAPS | MTK_HWLRO,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7629_CLKS_BITMAP,
+ .required_pctl = false,
+ .has_accounting = true,
+ .version = 1,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+static const struct mtk_soc_data mt7981_data = {
+ .reg_map = &mt7986_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7981_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7981_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 2,
+ .offload_version = 2,
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+};
+
+static const struct mtk_soc_data mt7986_data = {
+ .reg_map = &mt7986_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7986_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7986_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 2,
+ .offload_version = 2,
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+};
+
+static const struct mtk_soc_data mt7988_data = {
+ .reg_map = &mt7988_reg_map,
+ .ana_rgc3 = 0x128,
+ .caps = MT7988_CAPS,
+ .hw_features = MTK_HW_FEATURES,
+ .required_clks = MT7988_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 3,
+ .offload_version = 2,
+ .hash_offset = 4,
+ .has_accounting = true,
+ .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma_v2),
+ .rxd_size = sizeof(struct mtk_rx_dma_v2),
+ .rx_irq_done_mask = MTK_RX_DONE_INT_V2,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_V2,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN_V2,
+ .dma_len_offset = 8,
+ },
+};
+
+static const struct mtk_soc_data rt5350_data = {
+ .reg_map = &mt7628_reg_map,
+ .caps = MT7628_CAPS,
+ .hw_features = MTK_HW_FEATURES_MT7628,
+ .required_clks = MT7628_CLKS_BITMAP,
+ .required_pctl = false,
+ .version = 1,
+ .txrx = {
+ .txd_size = sizeof(struct mtk_tx_dma),
+ .rxd_size = sizeof(struct mtk_rx_dma),
+ .rx_irq_done_mask = MTK_RX_DONE_INT,
+ .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA,
+ .dma_max_len = MTK_TX_DMA_BUF_LEN,
+ .dma_len_offset = 16,
+ },
+};
+
+const struct of_device_id of_mtk_match[] = {
+ { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data },
+ { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data },
+ { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data },
+ { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data },
+ { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data },
+ { .compatible = "mediatek,mt7981-eth", .data = &mt7981_data },
+ { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data },
+ { .compatible = "mediatek,mt7988-eth", .data = &mt7988_data },
+ { .compatible = "ralink,rt5350-eth", .data = &rt5350_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_mtk_match);
+
+static struct platform_driver mtk_driver = {
+ .probe = mtk_probe,
+ .remove = mtk_remove,
+ .driver = {
+ .name = "mtk_soc_eth",
+ .of_match_table = of_mtk_match,
+ },
+};
+
+module_platform_driver(mtk_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC");
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
new file mode 100644
index 0000000000..403219d987
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -0,0 +1,1444 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ *
+ * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org>
+ * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com>
+ */
+
+#ifndef MTK_ETH_H
+#define MTK_ETH_H
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/of_net.h>
+#include <linux/u64_stats_sync.h>
+#include <linux/refcount.h>
+#include <linux/phylink.h>
+#include <linux/rhashtable.h>
+#include <linux/dim.h>
+#include <linux/bitfield.h>
+#include <net/page_pool/types.h>
+#include <linux/bpf_trace.h>
+#include "mtk_ppe.h"
+
+#define MTK_MAX_DSA_PORTS 7
+#define MTK_DSA_PORT_MASK GENMASK(2, 0)
+
+#define MTK_QDMA_NUM_QUEUES 16
+#define MTK_QDMA_PAGE_SIZE 2048
+#define MTK_MAX_RX_LENGTH 1536
+#define MTK_MAX_RX_LENGTH_2K 2048
+#define MTK_TX_DMA_BUF_LEN 0x3fff
+#define MTK_TX_DMA_BUF_LEN_V2 0xffff
+#define MTK_QDMA_RING_SIZE 2048
+#define MTK_DMA_SIZE 512
+#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN)
+#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN)
+#define MTK_DMA_DUMMY_DESC 0xffffffff
+#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR)
+#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \
+ NETIF_F_RXCSUM | \
+ NETIF_F_HW_VLAN_CTAG_TX | \
+ NETIF_F_SG | NETIF_F_TSO | \
+ NETIF_F_TSO6 | \
+ NETIF_F_IPV6_CSUM |\
+ NETIF_F_HW_TC)
+#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM)
+#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1))
+
+#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM
+#define MTK_PP_PAD (MTK_PP_HEADROOM + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
+#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD)
+
+#define MTK_QRX_OFFSET 0x10
+
+#define MTK_MAX_RX_RING_NUM 4
+#define MTK_HW_LRO_DMA_SIZE 8
+
+#define MTK_MAX_LRO_RX_LENGTH (4096 * 3)
+#define MTK_MAX_LRO_IP_CNT 2
+#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */
+#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */
+#define MTK_HW_LRO_AGG_TIME 10 /* 200us */
+#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */
+#define MTK_HW_LRO_MAX_AGG_CNT 64
+#define MTK_HW_LRO_BW_THRE 3000
+#define MTK_HW_LRO_REPLACE_DELTA 1000
+#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522
+
+/* Frame Engine Global Configuration */
+#define MTK_FE_GLO_CFG(x) (((x) == MTK_GMAC3_ID) ? 0x24 : 0x00)
+#define MTK_FE_LINK_DOWN_P(x) BIT(((x) + 8) % 16)
+
+/* Frame Engine Global Reset Register */
+#define MTK_RST_GL 0x04
+#define RST_GL_PSE BIT(0)
+
+/* Frame Engine Interrupt Status Register */
+#define MTK_INT_STATUS2 0x08
+#define MTK_FE_INT_ENABLE 0x0c
+#define MTK_FE_INT_FQ_EMPTY BIT(8)
+#define MTK_FE_INT_TSO_FAIL BIT(12)
+#define MTK_FE_INT_TSO_ILLEGAL BIT(13)
+#define MTK_FE_INT_TSO_ALIGN BIT(14)
+#define MTK_FE_INT_RFIFO_OV BIT(18)
+#define MTK_FE_INT_RFIFO_UF BIT(19)
+#define MTK_GDM1_AF BIT(28)
+#define MTK_GDM2_AF BIT(29)
+
+/* PDMA HW LRO Alter Flow Timer Register */
+#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c
+
+/* Frame Engine Interrupt Grouping Register */
+#define MTK_FE_INT_GRP 0x20
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMQ_IG_CTRL 0x1400
+#define MTK_CDMQ_STAG_EN BIT(0)
+
+/* CDMQ Exgress Control Register */
+#define MTK_CDMQ_EG_CTRL 0x1404
+
+/* CDMP Ingress Control Register */
+#define MTK_CDMP_IG_CTRL 0x400
+#define MTK_CDMP_STAG_EN BIT(0)
+
+/* CDMP Exgress Control Register */
+#define MTK_CDMP_EG_CTRL 0x404
+
+/* GDM Exgress Control Register */
+#define MTK_GDMA_FWD_CFG(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x540 : 0x500 + (_x * 0x1000); })
+#define MTK_GDMA_SPECIAL_TAG BIT(24)
+#define MTK_GDMA_ICS_EN BIT(22)
+#define MTK_GDMA_TCS_EN BIT(21)
+#define MTK_GDMA_UCS_EN BIT(20)
+#define MTK_GDMA_STRP_CRC BIT(16)
+#define MTK_GDMA_TO_PDMA 0x0
+#define MTK_GDMA_DROP_ALL 0x7777
+
+/* GDM Egress Control Register */
+#define MTK_GDMA_EG_CTRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x544 : 0x504 + (_x * 0x1000); })
+#define MTK_GDMA_XGDM_SEL BIT(31)
+
+/* Unicast Filter MAC Address Register - Low */
+#define MTK_GDMA_MAC_ADRL(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x548 : 0x508 + (_x * 0x1000); })
+
+/* Unicast Filter MAC Address Register - High */
+#define MTK_GDMA_MAC_ADRH(x) ({ typeof(x) _x = (x); (_x == MTK_GMAC3_ID) ? \
+ 0x54C : 0x50C + (_x * 0x1000); })
+
+/* Internal SRAM offset */
+#define MTK_ETH_SRAM_OFFSET 0x40000
+
+/* FE global misc reg*/
+#define MTK_FE_GLO_MISC 0x124
+
+/* PSE Free Queue Flow Control */
+#define PSE_FQFC_CFG1 0x100
+#define PSE_FQFC_CFG2 0x104
+#define PSE_DROP_CFG 0x108
+#define PSE_PPE0_DROP 0x110
+
+/* PSE Input Queue Reservation Register*/
+#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2))
+
+/* PSE Output Queue Threshold Register*/
+#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2))
+
+/* GDM and CDM Threshold */
+#define MTK_GDM2_THRES 0x1530
+#define MTK_CDMW0_THRES 0x164c
+#define MTK_CDMW1_THRES 0x1650
+#define MTK_CDME0_THRES 0x1654
+#define MTK_CDME1_THRES 0x1658
+#define MTK_CDMM_THRES 0x165c
+
+/* PDMA HW LRO Control Registers */
+#define MTK_PDMA_LRO_CTRL_DW0 0x980
+#define MTK_LRO_EN BIT(0)
+#define MTK_L3_CKS_UPD_EN BIT(7)
+#define MTK_L3_CKS_UPD_EN_V2 BIT(19)
+#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21)
+#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26)
+#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24)
+#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29)
+#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28)
+
+#define MTK_PDMA_LRO_CTRL_DW1 0x984
+#define MTK_PDMA_LRO_CTRL_DW2 0x988
+#define MTK_PDMA_LRO_CTRL_DW3 0x98c
+#define MTK_ADMA_MODE BIT(15)
+#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16)
+
+#define MTK_RX_DMA_LRO_EN BIT(8)
+#define MTK_MULTI_EN BIT(10)
+#define MTK_PDMA_SIZE_8DWORDS (1 << 4)
+
+/* PDMA Global Configuration Register */
+#define MTK_PDMA_LRO_SDL 0x3000
+#define MTK_RX_CFG_SDL_OFFSET 16
+
+/* PDMA Reset Index Register */
+#define MTK_PST_DRX_IDX0 BIT(16)
+#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x))
+
+/* PDMA Delay Interrupt Register */
+#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0)
+#define MTK_PDMA_DELAY_RX_EN BIT(15)
+#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8
+#define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0
+
+#define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16)
+#define MTK_PDMA_DELAY_TX_EN BIT(31)
+#define MTK_PDMA_DELAY_TX_PINT_SHIFT 24
+#define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16
+
+#define MTK_PDMA_DELAY_PINT_MASK 0x7f
+#define MTK_PDMA_DELAY_PTIME_MASK 0xff
+
+/* PDMA HW LRO Alter Flow Delta Register */
+#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c
+
+/* PDMA HW LRO IP Setting Registers */
+#define MTK_LRO_RX_RING0_DIP_DW0 0xb04
+#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40))
+#define MTK_RING_MYIP_VLD BIT(9)
+
+/* PDMA HW LRO Ring Control Registers */
+#define MTK_LRO_RX_RING0_CTRL_DW1 0xb28
+#define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c
+#define MTK_LRO_RX_RING0_CTRL_DW3 0xb30
+#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40))
+#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40))
+#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40))
+#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22)
+#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f)
+#define MTK_RING_AUTO_LERAN_MODE (3 << 6)
+#define MTK_RING_VLD BIT(8)
+#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10)
+#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26)
+#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3)
+
+/* QDMA TX Queue Configuration Registers */
+#define MTK_QTX_OFFSET 0x10
+#define QDMA_RES_THRES 4
+
+/* QDMA Tx Queue Scheduler Configuration Registers */
+#define MTK_QTX_SCH_TX_SEL BIT(31)
+#define MTK_QTX_SCH_TX_SEL_V2 GENMASK(31, 30)
+
+#define MTK_QTX_SCH_LEAKY_BUCKET_EN BIT(30)
+#define MTK_QTX_SCH_LEAKY_BUCKET_SIZE GENMASK(29, 28)
+#define MTK_QTX_SCH_MIN_RATE_EN BIT(27)
+#define MTK_QTX_SCH_MIN_RATE_MAN GENMASK(26, 20)
+#define MTK_QTX_SCH_MIN_RATE_EXP GENMASK(19, 16)
+#define MTK_QTX_SCH_MAX_RATE_WEIGHT GENMASK(15, 12)
+#define MTK_QTX_SCH_MAX_RATE_EN BIT(11)
+#define MTK_QTX_SCH_MAX_RATE_MAN GENMASK(10, 4)
+#define MTK_QTX_SCH_MAX_RATE_EXP GENMASK(3, 0)
+
+/* QDMA TX Scheduler Rate Control Register */
+#define MTK_QDMA_TX_SCH_MAX_WFQ BIT(15)
+
+/* QDMA Global Configuration Register */
+#define MTK_RX_2B_OFFSET BIT(31)
+#define MTK_RX_BT_32DWORDS (3 << 11)
+#define MTK_NDP_CO_PRO BIT(10)
+#define MTK_TX_WB_DDONE BIT(6)
+#define MTK_TX_BT_32DWORDS (3 << 4)
+#define MTK_RX_DMA_BUSY BIT(3)
+#define MTK_TX_DMA_BUSY BIT(1)
+#define MTK_RX_DMA_EN BIT(2)
+#define MTK_TX_DMA_EN BIT(0)
+#define MTK_DMA_BUSY_TIMEOUT_US 1000000
+
+/* QDMA V2 Global Configuration Register */
+#define MTK_CHK_DDONE_EN BIT(28)
+#define MTK_DMAD_WR_WDONE BIT(26)
+#define MTK_WCOMP_EN BIT(24)
+#define MTK_RESV_BUF (0x40 << 16)
+#define MTK_MUTLI_CNT (0x4 << 12)
+#define MTK_LEAKY_BUCKET_EN BIT(11)
+
+/* QDMA Flow Control Register */
+#define FC_THRES_DROP_MODE BIT(20)
+#define FC_THRES_DROP_EN (7 << 16)
+#define FC_THRES_MIN 0x4444
+
+/* QDMA Interrupt Status Register */
+#define MTK_RX_DONE_DLY BIT(30)
+#define MTK_TX_DONE_DLY BIT(28)
+#define MTK_RX_DONE_INT3 BIT(19)
+#define MTK_RX_DONE_INT2 BIT(18)
+#define MTK_RX_DONE_INT1 BIT(17)
+#define MTK_RX_DONE_INT0 BIT(16)
+#define MTK_TX_DONE_INT3 BIT(3)
+#define MTK_TX_DONE_INT2 BIT(2)
+#define MTK_TX_DONE_INT1 BIT(1)
+#define MTK_TX_DONE_INT0 BIT(0)
+#define MTK_RX_DONE_INT MTK_RX_DONE_DLY
+#define MTK_TX_DONE_INT MTK_TX_DONE_DLY
+
+#define MTK_RX_DONE_INT_V2 BIT(14)
+
+#define MTK_CDM_TXFIFO_RDY BIT(7)
+
+/* QDMA Interrupt grouping registers */
+#define MTK_RLS_DONE_INT BIT(0)
+
+/* QDMA TX NUM */
+#define QID_BITS_V2(x) (((x) & 0x3f) << 16)
+#define MTK_QDMA_GMAC2_QID 8
+
+#define MTK_TX_DMA_BUF_SHIFT 8
+
+/* QDMA V2 descriptor txd6 */
+#define TX_DMA_INS_VLAN_V2 BIT(16)
+/* QDMA V2 descriptor txd5 */
+#define TX_DMA_CHKSUM_V2 (0x7 << 28)
+#define TX_DMA_TSO_V2 BIT(31)
+
+#define TX_DMA_SPTAG_V3 BIT(27)
+
+/* QDMA V2 descriptor txd4 */
+#define TX_DMA_FPORT_SHIFT_V2 8
+#define TX_DMA_FPORT_MASK_V2 0xf
+#define TX_DMA_SWC_V2 BIT(30)
+
+/* QDMA descriptor txd4 */
+#define TX_DMA_CHKSUM (0x7 << 29)
+#define TX_DMA_TSO BIT(28)
+#define TX_DMA_FPORT_SHIFT 25
+#define TX_DMA_FPORT_MASK 0x7
+#define TX_DMA_INS_VLAN BIT(16)
+
+/* QDMA descriptor txd3 */
+#define TX_DMA_OWNER_CPU BIT(31)
+#define TX_DMA_LS0 BIT(30)
+#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len)
+#define TX_DMA_SWC BIT(14)
+#define TX_DMA_PQID GENMASK(3, 0)
+#define TX_DMA_ADDR64_MASK GENMASK(3, 0)
+#if IS_ENABLED(CONFIG_64BIT)
+# define TX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(TX_DMA_ADDR64_MASK, (x))) << 32)
+# define TX_DMA_PREP_ADDR64(x) FIELD_PREP(TX_DMA_ADDR64_MASK, ((x) >> 32))
+#else
+# define TX_DMA_GET_ADDR64(x) (0)
+# define TX_DMA_PREP_ADDR64(x) (0)
+#endif
+
+/* PDMA on MT7628 */
+#define TX_DMA_DONE BIT(31)
+#define TX_DMA_LS1 BIT(14)
+#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE)
+
+/* QDMA descriptor rxd2 */
+#define RX_DMA_DONE BIT(31)
+#define RX_DMA_LSO BIT(30)
+#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset)
+#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len)
+#define RX_DMA_VTAG BIT(15)
+#define RX_DMA_ADDR64_MASK GENMASK(3, 0)
+#if IS_ENABLED(CONFIG_64BIT)
+# define RX_DMA_GET_ADDR64(x) (((u64)FIELD_GET(RX_DMA_ADDR64_MASK, (x))) << 32)
+# define RX_DMA_PREP_ADDR64(x) FIELD_PREP(RX_DMA_ADDR64_MASK, ((x) >> 32))
+#else
+# define RX_DMA_GET_ADDR64(x) (0)
+# define RX_DMA_PREP_ADDR64(x) (0)
+#endif
+
+/* QDMA descriptor rxd3 */
+#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK)
+#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK))
+#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff)
+
+/* QDMA descriptor rxd4 */
+#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0)
+#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14)
+#define MTK_RXD4_SRC_PORT GENMASK(21, 19)
+#define MTK_RXD4_ALG GENMASK(31, 22)
+
+/* QDMA descriptor rxd4 */
+#define RX_DMA_L4_VALID BIT(24)
+#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */
+#define RX_DMA_SPECIAL_TAG BIT(22)
+
+/* PDMA descriptor rxd5 */
+#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0)
+#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18)
+#define MTK_RXD5_SRC_PORT GENMASK(29, 26)
+
+#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7)
+#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf)
+
+/* PDMA V2 descriptor rxd3 */
+#define RX_DMA_VTAG_V2 BIT(0)
+#define RX_DMA_L4_VALID_V2 BIT(2)
+
+/* PHY Polling and SMI Master Control registers */
+#define MTK_PPSC 0x10000
+#define PPSC_MDC_CFG GENMASK(29, 24)
+#define PPSC_MDC_TURBO BIT(20)
+#define MDC_MAX_FREQ 25000000
+#define MDC_MAX_DIVIDER 63
+
+/* PHY Indirect Access Control registers */
+#define MTK_PHY_IAC 0x10004
+#define PHY_IAC_ACCESS BIT(31)
+#define PHY_IAC_REG_MASK GENMASK(29, 25)
+#define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x))
+#define PHY_IAC_ADDR_MASK GENMASK(24, 20)
+#define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x))
+#define PHY_IAC_CMD_MASK GENMASK(19, 18)
+#define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0)
+#define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1)
+#define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2)
+#define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3)
+#define PHY_IAC_START_MASK GENMASK(17, 16)
+#define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0)
+#define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1)
+#define PHY_IAC_DATA_MASK GENMASK(15, 0)
+#define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x))
+#define PHY_IAC_TIMEOUT HZ
+
+#define MTK_MAC_MISC 0x1000c
+#define MTK_MAC_MISC_V3 0x10010
+#define MTK_MUX_TO_ESW BIT(0)
+#define MISC_MDC_TURBO BIT(4)
+
+/* XMAC status registers */
+#define MTK_XGMAC_STS(x) (((x) == MTK_GMAC3_ID) ? 0x1001C : 0x1000C)
+#define MTK_XGMAC_FORCE_LINK(x) (((x) == MTK_GMAC2_ID) ? BIT(31) : BIT(15))
+#define MTK_USXGMII_PCS_LINK BIT(8)
+#define MTK_XGMAC_RX_FC BIT(5)
+#define MTK_XGMAC_TX_FC BIT(4)
+#define MTK_USXGMII_PCS_MODE GENMASK(3, 1)
+#define MTK_XGMAC_LINK_STS BIT(0)
+
+/* GSW bridge registers */
+#define MTK_GSW_CFG (0x10080)
+#define GSWTX_IPG_MASK GENMASK(19, 16)
+#define GSWTX_IPG_SHIFT 16
+#define GSWRX_IPG_MASK GENMASK(3, 0)
+#define GSWRX_IPG_SHIFT 0
+#define GSW_IPG_11 11
+
+/* Mac control registers */
+#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100))
+#define MAC_MCR_MAX_RX_MASK GENMASK(25, 24)
+#define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24))
+#define MAC_MCR_MAX_RX_1518 0x0
+#define MAC_MCR_MAX_RX_1536 0x1
+#define MAC_MCR_MAX_RX_1552 0x2
+#define MAC_MCR_MAX_RX_2048 0x3
+#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16))
+#define MAC_MCR_FORCE_MODE BIT(15)
+#define MAC_MCR_TX_EN BIT(14)
+#define MAC_MCR_RX_EN BIT(13)
+#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12)
+#define MAC_MCR_BACKOFF_EN BIT(9)
+#define MAC_MCR_BACKPR_EN BIT(8)
+#define MAC_MCR_FORCE_RX_FC BIT(5)
+#define MAC_MCR_FORCE_TX_FC BIT(4)
+#define MAC_MCR_SPEED_1000 BIT(3)
+#define MAC_MCR_SPEED_100 BIT(2)
+#define MAC_MCR_FORCE_DPX BIT(1)
+#define MAC_MCR_FORCE_LINK BIT(0)
+#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE)
+
+/* Mac status registers */
+#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100))
+#define MAC_MSR_EEE1G BIT(7)
+#define MAC_MSR_EEE100M BIT(6)
+#define MAC_MSR_RX_FC BIT(5)
+#define MAC_MSR_TX_FC BIT(4)
+#define MAC_MSR_SPEED_1000 BIT(3)
+#define MAC_MSR_SPEED_100 BIT(2)
+#define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)
+#define MAC_MSR_DPX BIT(1)
+#define MAC_MSR_LINK BIT(0)
+
+/* TRGMII RXC control register */
+#define TRGMII_RCK_CTRL 0x10300
+#define DQSI0(x) ((x << 0) & GENMASK(6, 0))
+#define DQSI1(x) ((x << 8) & GENMASK(14, 8))
+#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define RXC_RST BIT(31)
+#define RXC_DQSISEL BIT(30)
+#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16))
+#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2)
+
+#define NUM_TRGMII_CTRL 5
+
+/* TRGMII RXC control register */
+#define TRGMII_TCK_CTRL 0x10340
+#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16))
+#define TXC_INV BIT(30)
+#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2)
+#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2))
+
+/* TRGMII TX Drive Strength */
+#define TRGMII_TD_ODT(i) (0x10354 + 8 * (i))
+#define TD_DM_DRVP(x) ((x) & 0xf)
+#define TD_DM_DRVN(x) (((x) & 0xf) << 4)
+
+/* TRGMII Interface mode register */
+#define INTF_MODE 0x10390
+#define TRGMII_INTF_DIS BIT(0)
+#define TRGMII_MODE BIT(1)
+#define TRGMII_CENTRAL_ALIGNED BIT(2)
+#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED)
+#define INTF_MODE_RGMII_10_100 0
+
+/* GPIO port control registers for GMAC 2*/
+#define GPIO_OD33_CTRL8 0x4c0
+#define GPIO_BIAS_CTRL 0xed0
+#define GPIO_DRV_SEL10 0xf00
+
+/* ethernet subsystem chip id register */
+#define ETHSYS_CHIPID0_3 0x0
+#define ETHSYS_CHIPID4_7 0x4
+#define MT7623_ETH 7623
+#define MT7622_ETH 7622
+#define MT7621_ETH 7621
+
+/* ethernet system control register */
+#define ETHSYS_SYSCFG 0x10
+#define SYSCFG_DRAM_TYPE_DDR2 BIT(4)
+
+/* ethernet subsystem config register */
+#define ETHSYS_SYSCFG0 0x14
+#define SYSCFG0_GE_MASK 0x3
+#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2)))
+#define SYSCFG0_SGMII_MASK GENMASK(9, 7)
+#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK)
+#define SYSCFG0_SGMII_GMAC1_V2 BIT(9)
+#define SYSCFG0_SGMII_GMAC2_V2 BIT(8)
+
+
+/* ethernet subsystem clock register */
+#define ETHSYS_CLKCFG0 0x2c
+#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11)
+#define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6))
+#define ETHSYS_TRGMII_MT7621_APLL BIT(6)
+#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5)
+
+/* ethernet reset control register */
+#define ETHSYS_RSTCTRL 0x34
+#define RSTCTRL_FE BIT(6)
+#define RSTCTRL_WDMA0 BIT(24)
+#define RSTCTRL_WDMA1 BIT(25)
+#define RSTCTRL_WDMA2 BIT(26)
+#define RSTCTRL_PPE0 BIT(31)
+#define RSTCTRL_PPE0_V2 BIT(30)
+#define RSTCTRL_PPE1 BIT(31)
+#define RSTCTRL_PPE0_V3 BIT(29)
+#define RSTCTRL_PPE1_V3 BIT(30)
+#define RSTCTRL_PPE2 BIT(31)
+#define RSTCTRL_ETH BIT(23)
+
+/* ethernet reset check idle register */
+#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28
+
+/* ethernet dma channel agent map */
+#define ETHSYS_DMA_AG_MAP 0x408
+#define ETHSYS_DMA_AG_MAP_PDMA BIT(0)
+#define ETHSYS_DMA_AG_MAP_QDMA BIT(1)
+#define ETHSYS_DMA_AG_MAP_PPE BIT(2)
+
+/* Infrasys subsystem config registers */
+#define INFRA_MISC2 0x70c
+#define CO_QPHY_SEL BIT(0)
+#define GEPHY_MAC_SEL BIT(1)
+
+/* Top misc registers */
+#define USB_PHY_SWITCH_REG 0x218
+#define QPHY_SEL_MASK GENMASK(1, 0)
+#define SGMII_QPHY_SEL 0x2
+
+/* MT7628/88 specific stuff */
+#define MT7628_PDMA_OFFSET 0x0800
+#define MT7628_SDM_OFFSET 0x0c00
+
+#define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00)
+#define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04)
+#define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08)
+#define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c)
+#define MT7628_PST_DTX_IDX0 BIT(0)
+
+#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c)
+#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10)
+
+/* Counter / stat register */
+#define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100)
+#define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104)
+#define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108)
+#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c)
+#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110)
+
+#define MTK_FE_CDM1_FSM 0x220
+#define MTK_FE_CDM2_FSM 0x224
+#define MTK_FE_CDM3_FSM 0x238
+#define MTK_FE_CDM4_FSM 0x298
+#define MTK_FE_CDM5_FSM 0x318
+#define MTK_FE_CDM6_FSM 0x328
+#define MTK_FE_GDM1_FSM 0x228
+#define MTK_FE_GDM2_FSM 0x22C
+
+#define MTK_MAC_FSM(x) (0x1010C + ((x) * 0x100))
+
+struct mtk_rx_dma {
+ unsigned int rxd1;
+ unsigned int rxd2;
+ unsigned int rxd3;
+ unsigned int rxd4;
+} __packed __aligned(4);
+
+struct mtk_rx_dma_v2 {
+ unsigned int rxd1;
+ unsigned int rxd2;
+ unsigned int rxd3;
+ unsigned int rxd4;
+ unsigned int rxd5;
+ unsigned int rxd6;
+ unsigned int rxd7;
+ unsigned int rxd8;
+} __packed __aligned(4);
+
+struct mtk_tx_dma {
+ unsigned int txd1;
+ unsigned int txd2;
+ unsigned int txd3;
+ unsigned int txd4;
+} __packed __aligned(4);
+
+struct mtk_tx_dma_v2 {
+ unsigned int txd1;
+ unsigned int txd2;
+ unsigned int txd3;
+ unsigned int txd4;
+ unsigned int txd5;
+ unsigned int txd6;
+ unsigned int txd7;
+ unsigned int txd8;
+} __packed __aligned(4);
+
+struct mtk_eth;
+struct mtk_mac;
+
+struct mtk_xdp_stats {
+ u64 rx_xdp_redirect;
+ u64 rx_xdp_pass;
+ u64 rx_xdp_drop;
+ u64 rx_xdp_tx;
+ u64 rx_xdp_tx_errors;
+ u64 tx_xdp_xmit;
+ u64 tx_xdp_xmit_errors;
+};
+
+/* struct mtk_hw_stats - the structure that holds the traffic statistics.
+ * @stats_lock: make sure that stats operations are atomic
+ * @reg_offset: the status register offset of the SoC
+ * @syncp: the refcount
+ *
+ * All of the supported SoCs have hardware counters for traffic statistics.
+ * Whenever the status IRQ triggers we can read the latest stats from these
+ * counters and store them in this struct.
+ */
+struct mtk_hw_stats {
+ u64 tx_bytes;
+ u64 tx_packets;
+ u64 tx_skip;
+ u64 tx_collisions;
+ u64 rx_bytes;
+ u64 rx_packets;
+ u64 rx_overflow;
+ u64 rx_fcs_errors;
+ u64 rx_short_errors;
+ u64 rx_long_errors;
+ u64 rx_checksum_errors;
+ u64 rx_flow_control_packets;
+
+ struct mtk_xdp_stats xdp_stats;
+
+ spinlock_t stats_lock;
+ u32 reg_offset;
+ struct u64_stats_sync syncp;
+};
+
+enum mtk_tx_flags {
+ /* PDMA descriptor can point at 1-2 segments. This enum allows us to
+ * track how memory was allocated so that it can be freed properly.
+ */
+ MTK_TX_FLAGS_SINGLE0 = 0x01,
+ MTK_TX_FLAGS_PAGE0 = 0x02,
+};
+
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+ MTK_CLK_ETHIF,
+ MTK_CLK_SGMIITOP,
+ MTK_CLK_ESW,
+ MTK_CLK_GP0,
+ MTK_CLK_GP1,
+ MTK_CLK_GP2,
+ MTK_CLK_GP3,
+ MTK_CLK_XGP1,
+ MTK_CLK_XGP2,
+ MTK_CLK_XGP3,
+ MTK_CLK_CRYPTO,
+ MTK_CLK_FE,
+ MTK_CLK_TRGPLL,
+ MTK_CLK_SGMII_TX_250M,
+ MTK_CLK_SGMII_RX_250M,
+ MTK_CLK_SGMII_CDR_REF,
+ MTK_CLK_SGMII_CDR_FB,
+ MTK_CLK_SGMII2_TX_250M,
+ MTK_CLK_SGMII2_RX_250M,
+ MTK_CLK_SGMII2_CDR_REF,
+ MTK_CLK_SGMII2_CDR_FB,
+ MTK_CLK_SGMII_CK,
+ MTK_CLK_ETH2PLL,
+ MTK_CLK_WOCPU0,
+ MTK_CLK_WOCPU1,
+ MTK_CLK_NETSYS0,
+ MTK_CLK_NETSYS1,
+ MTK_CLK_ETHWARP_WOCPU2,
+ MTK_CLK_ETHWARP_WOCPU1,
+ MTK_CLK_ETHWARP_WOCPU0,
+ MTK_CLK_TOP_USXGMII_SBUS_0_SEL,
+ MTK_CLK_TOP_USXGMII_SBUS_1_SEL,
+ MTK_CLK_TOP_SGM_0_SEL,
+ MTK_CLK_TOP_SGM_1_SEL,
+ MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL,
+ MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL,
+ MTK_CLK_TOP_ETH_GMII_SEL,
+ MTK_CLK_TOP_ETH_REFCK_50M_SEL,
+ MTK_CLK_TOP_ETH_SYS_200M_SEL,
+ MTK_CLK_TOP_ETH_SYS_SEL,
+ MTK_CLK_TOP_ETH_XGMII_SEL,
+ MTK_CLK_TOP_ETH_MII_SEL,
+ MTK_CLK_TOP_NETSYS_SEL,
+ MTK_CLK_TOP_NETSYS_500M_SEL,
+ MTK_CLK_TOP_NETSYS_PAO_2X_SEL,
+ MTK_CLK_TOP_NETSYS_SYNC_250M_SEL,
+ MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL,
+ MTK_CLK_TOP_NETSYS_WARP_SEL,
+ MTK_CLK_MAX
+};
+
+#define MT7623_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_TRGPLL))
+#define MT7622_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK) | \
+ BIT_ULL(MTK_CLK_ETH2PLL))
+#define MT7621_CLKS_BITMAP (0)
+#define MT7628_CLKS_BITMAP (0)
+#define MT7629_CLKS_BITMAP (BIT_ULL(MTK_CLK_ETHIF) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP0) | BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_GP2) | BIT_ULL(MTK_CLK_FE) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK) | \
+ BIT_ULL(MTK_CLK_ETH2PLL) | BIT_ULL(MTK_CLK_SGMIITOP))
+#define MT7981_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_WOCPU0) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII_CK))
+#define MT7986_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP1) | \
+ BIT_ULL(MTK_CLK_WOCPU1) | BIT_ULL(MTK_CLK_WOCPU0) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII_CDR_FB) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_REF) | \
+ BIT_ULL(MTK_CLK_SGMII2_CDR_FB))
+#define MT7988_CLKS_BITMAP (BIT_ULL(MTK_CLK_FE) | BIT_ULL(MTK_CLK_ESW) | \
+ BIT_ULL(MTK_CLK_GP1) | BIT_ULL(MTK_CLK_GP2) | \
+ BIT_ULL(MTK_CLK_GP3) | BIT_ULL(MTK_CLK_XGP1) | \
+ BIT_ULL(MTK_CLK_XGP2) | BIT_ULL(MTK_CLK_XGP3) | \
+ BIT_ULL(MTK_CLK_CRYPTO) | \
+ BIT_ULL(MTK_CLK_SGMII_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII_RX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_TX_250M) | \
+ BIT_ULL(MTK_CLK_SGMII2_RX_250M) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU2) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU1) | \
+ BIT_ULL(MTK_CLK_ETHWARP_WOCPU0) | \
+ BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_0_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_USXGMII_SBUS_1_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_SGM_0_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_SGM_1_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_XFI_PHY_0_XTAL_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_XFI_PHY_1_XTAL_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_GMII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_REFCK_50M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_SYS_200M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_SYS_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_XGMII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_ETH_MII_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_500M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_PAO_2X_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_SYNC_250M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_PPEFB_250M_SEL) | \
+ BIT_ULL(MTK_CLK_TOP_NETSYS_WARP_SEL))
+
+enum mtk_dev_state {
+ MTK_HW_INIT,
+ MTK_RESETTING
+};
+
+/* PSE Port Definition */
+enum mtk_pse_port {
+ PSE_ADMA_PORT = 0,
+ PSE_GDM1_PORT,
+ PSE_GDM2_PORT,
+ PSE_PPE0_PORT,
+ PSE_PPE1_PORT,
+ PSE_QDMA_TX_PORT,
+ PSE_QDMA_RX_PORT,
+ PSE_DROP_PORT,
+ PSE_WDMA0_PORT,
+ PSE_WDMA1_PORT,
+ PSE_TDMA_PORT,
+ PSE_NONE_PORT,
+ PSE_PPE2_PORT,
+ PSE_WDMA2_PORT,
+ PSE_EIP197_PORT,
+ PSE_GDM3_PORT,
+ PSE_PORT_MAX
+};
+
+/* GMAC Identifier */
+enum mtk_gmac_id {
+ MTK_GMAC1_ID = 0,
+ MTK_GMAC2_ID,
+ MTK_GMAC3_ID,
+ MTK_GMAC_ID_MAX
+};
+
+enum mtk_tx_buf_type {
+ MTK_TYPE_SKB,
+ MTK_TYPE_XDP_TX,
+ MTK_TYPE_XDP_NDO,
+};
+
+/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
+ * by the TX descriptor s
+ * @skb: The SKB pointer of the packet being sent
+ * @dma_addr0: The base addr of the first segment
+ * @dma_len0: The length of the first segment
+ * @dma_addr1: The base addr of the second segment
+ * @dma_len1: The length of the second segment
+ */
+struct mtk_tx_buf {
+ enum mtk_tx_buf_type type;
+ void *data;
+
+ u16 mac_id;
+ u16 flags;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr0);
+ DEFINE_DMA_UNMAP_LEN(dma_len0);
+ DEFINE_DMA_UNMAP_ADDR(dma_addr1);
+ DEFINE_DMA_UNMAP_LEN(dma_len1);
+};
+
+/* struct mtk_tx_ring - This struct holds info describing a TX ring
+ * @dma: The descriptor ring
+ * @buf: The memory pointed at by the ring
+ * @phys: The physical addr of tx_buf
+ * @next_free: Pointer to the next free descriptor
+ * @last_free: Pointer to the last free descriptor
+ * @last_free_ptr: Hardware pointer value of the last free descriptor
+ * @thresh: The threshold of minimum amount of free descriptors
+ * @free_count: QDMA uses a linked list. Track how many free descriptors
+ * are present
+ */
+struct mtk_tx_ring {
+ void *dma;
+ struct mtk_tx_buf *buf;
+ dma_addr_t phys;
+ struct mtk_tx_dma *next_free;
+ struct mtk_tx_dma *last_free;
+ u32 last_free_ptr;
+ u16 thresh;
+ atomic_t free_count;
+ int dma_size;
+ struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */
+ dma_addr_t phys_pdma;
+ int cpu_idx;
+};
+
+/* PDMA rx ring mode */
+enum mtk_rx_flags {
+ MTK_RX_FLAGS_NORMAL = 0,
+ MTK_RX_FLAGS_HWLRO,
+ MTK_RX_FLAGS_QDMA,
+};
+
+/* struct mtk_rx_ring - This struct holds info describing a RX ring
+ * @dma: The descriptor ring
+ * @data: The memory pointed at by the ring
+ * @phys: The physical addr of rx_buf
+ * @frag_size: How big can each fragment be
+ * @buf_size: The size of each packet buffer
+ * @calc_idx: The current head of ring
+ */
+struct mtk_rx_ring {
+ void *dma;
+ u8 **data;
+ dma_addr_t phys;
+ u16 frag_size;
+ u16 buf_size;
+ u16 dma_size;
+ bool calc_idx_update;
+ u16 calc_idx;
+ u32 crx_idx_reg;
+ /* page_pool */
+ struct page_pool *page_pool;
+ struct xdp_rxq_info xdp_q;
+};
+
+enum mkt_eth_capabilities {
+ MTK_RGMII_BIT = 0,
+ MTK_TRGMII_BIT,
+ MTK_SGMII_BIT,
+ MTK_ESW_BIT,
+ MTK_GEPHY_BIT,
+ MTK_MUX_BIT,
+ MTK_INFRA_BIT,
+ MTK_SHARED_SGMII_BIT,
+ MTK_HWLRO_BIT,
+ MTK_SHARED_INT_BIT,
+ MTK_TRGMII_MT7621_CLK_BIT,
+ MTK_QDMA_BIT,
+ MTK_SOC_MT7628_BIT,
+ MTK_RSTCTRL_PPE1_BIT,
+ MTK_RSTCTRL_PPE2_BIT,
+ MTK_U3_COPHY_V2_BIT,
+ MTK_SRAM_BIT,
+ MTK_36BIT_DMA_BIT,
+
+ /* MUX BITS*/
+ MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT,
+ MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT,
+ MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT,
+ MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT,
+ MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT,
+
+ /* PATH BITS */
+ MTK_ETH_PATH_GMAC1_RGMII_BIT,
+ MTK_ETH_PATH_GMAC1_TRGMII_BIT,
+ MTK_ETH_PATH_GMAC1_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_RGMII_BIT,
+ MTK_ETH_PATH_GMAC2_SGMII_BIT,
+ MTK_ETH_PATH_GMAC2_GEPHY_BIT,
+ MTK_ETH_PATH_GDM1_ESW_BIT,
+};
+
+/* Supported hardware group on SoCs */
+#define MTK_RGMII BIT_ULL(MTK_RGMII_BIT)
+#define MTK_TRGMII BIT_ULL(MTK_TRGMII_BIT)
+#define MTK_SGMII BIT_ULL(MTK_SGMII_BIT)
+#define MTK_ESW BIT_ULL(MTK_ESW_BIT)
+#define MTK_GEPHY BIT_ULL(MTK_GEPHY_BIT)
+#define MTK_MUX BIT_ULL(MTK_MUX_BIT)
+#define MTK_INFRA BIT_ULL(MTK_INFRA_BIT)
+#define MTK_SHARED_SGMII BIT_ULL(MTK_SHARED_SGMII_BIT)
+#define MTK_HWLRO BIT_ULL(MTK_HWLRO_BIT)
+#define MTK_SHARED_INT BIT_ULL(MTK_SHARED_INT_BIT)
+#define MTK_TRGMII_MT7621_CLK BIT_ULL(MTK_TRGMII_MT7621_CLK_BIT)
+#define MTK_QDMA BIT_ULL(MTK_QDMA_BIT)
+#define MTK_SOC_MT7628 BIT_ULL(MTK_SOC_MT7628_BIT)
+#define MTK_RSTCTRL_PPE1 BIT_ULL(MTK_RSTCTRL_PPE1_BIT)
+#define MTK_RSTCTRL_PPE2 BIT_ULL(MTK_RSTCTRL_PPE2_BIT)
+#define MTK_U3_COPHY_V2 BIT_ULL(MTK_U3_COPHY_V2_BIT)
+#define MTK_SRAM BIT_ULL(MTK_SRAM_BIT)
+#define MTK_36BIT_DMA BIT_ULL(MTK_36BIT_DMA_BIT)
+
+#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \
+ BIT_ULL(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT)
+#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \
+ BIT_ULL(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT)
+#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \
+ BIT_ULL(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT)
+#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
+ BIT_ULL(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT)
+#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \
+ BIT_ULL(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT)
+
+/* Supported path present on SoCs */
+#define MTK_ETH_PATH_GMAC1_RGMII BIT_ULL(MTK_ETH_PATH_GMAC1_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_TRGMII BIT_ULL(MTK_ETH_PATH_GMAC1_TRGMII_BIT)
+#define MTK_ETH_PATH_GMAC1_SGMII BIT_ULL(MTK_ETH_PATH_GMAC1_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_RGMII BIT_ULL(MTK_ETH_PATH_GMAC2_RGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_SGMII BIT_ULL(MTK_ETH_PATH_GMAC2_SGMII_BIT)
+#define MTK_ETH_PATH_GMAC2_GEPHY BIT_ULL(MTK_ETH_PATH_GMAC2_GEPHY_BIT)
+#define MTK_ETH_PATH_GDM1_ESW BIT_ULL(MTK_ETH_PATH_GDM1_ESW_BIT)
+
+#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII)
+#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII)
+#define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII)
+#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII)
+#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII)
+#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY)
+#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW)
+
+/* MUXes present on SoCs */
+/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */
+#define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX)
+
+/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */
+#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \
+ (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA)
+
+/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */
+#define MTK_MUX_U3_GMAC2_TO_QPHY \
+ (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA)
+
+/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */
+#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \
+ (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \
+ MTK_SHARED_SGMII)
+
+/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */
+#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \
+ (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX)
+
+#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x))
+
+#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \
+ MTK_GMAC2_RGMII | MTK_SHARED_INT | \
+ MTK_TRGMII_MT7621_CLK | MTK_QDMA)
+
+#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \
+ MTK_GMAC2_SGMII | MTK_GDM1_ESW | \
+ MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA)
+
+#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \
+ MTK_QDMA)
+
+#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628)
+
+#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+ MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \
+ MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \
+ MTK_MUX_U3_GMAC2_TO_QPHY | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA)
+
+#define MT7981_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_MUX_U3_GMAC2_TO_QPHY | MTK_U3_COPHY_V2 | \
+ MTK_RSTCTRL_PPE1 | MTK_SRAM)
+
+#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \
+ MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \
+ MTK_RSTCTRL_PPE1 | MTK_SRAM)
+
+#define MT7988_CAPS (MTK_36BIT_DMA | MTK_GDM1_ESW | MTK_QDMA | \
+ MTK_RSTCTRL_PPE1 | MTK_RSTCTRL_PPE2 | MTK_SRAM)
+
+struct mtk_tx_dma_desc_info {
+ dma_addr_t addr;
+ u32 size;
+ u16 vlan_tci;
+ u16 qid;
+ u8 gso:1;
+ u8 csum:1;
+ u8 vlan:1;
+ u8 first:1;
+ u8 last:1;
+};
+
+struct mtk_reg_map {
+ u32 tx_irq_mask;
+ u32 tx_irq_status;
+ struct {
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 pcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 irq_status; /* interrupt status */
+ u32 irq_mask; /* interrupt mask */
+ u32 adma_rx_dbg0;
+ u32 int_grp;
+ } pdma;
+ struct {
+ u32 qtx_cfg; /* tx queue configuration */
+ u32 qtx_sch; /* tx queue scheduler configuration */
+ u32 rx_ptr; /* rx base pointer */
+ u32 rx_cnt_cfg; /* rx max count configuration */
+ u32 qcrx_ptr; /* rx cpu pointer */
+ u32 glo_cfg; /* global configuration */
+ u32 rst_idx; /* reset index */
+ u32 delay_irq; /* delay interrupt */
+ u32 fc_th; /* flow control */
+ u32 int_grp;
+ u32 hred; /* interrupt mask */
+ u32 ctx_ptr; /* tx acquire cpu pointer */
+ u32 dtx_ptr; /* tx acquire dma pointer */
+ u32 crx_ptr; /* tx release cpu pointer */
+ u32 drx_ptr; /* tx release dma pointer */
+ u32 fq_head; /* fq head pointer */
+ u32 fq_tail; /* fq tail pointer */
+ u32 fq_count; /* fq free page count */
+ u32 fq_blen; /* fq free page buffer length */
+ u32 tx_sch_rate; /* tx scheduler rate control registers */
+ } qdma;
+ u32 gdm1_cnt;
+ u32 gdma_to_ppe;
+ u32 ppe_base;
+ u32 wdma_base[2];
+ u32 pse_iq_sta;
+ u32 pse_oq_sta;
+};
+
+/* struct mtk_eth_data - This is the structure holding all differences
+ * among various plaforms
+ * @reg_map Soc register map.
+ * @ana_rgc3: The offset for register ANA_RGC3 related to
+ * sgmiisys syscon
+ * @caps Flags shown the extra capability for the SoC
+ * @hw_features Flags shown HW features
+ * @required_clks Flags shown the bitmap for required clocks on
+ * the target SoC
+ * @required_pctl A bool value to show whether the SoC requires
+ * the extra setup for those pins used by GMAC.
+ * @hash_offset Flow table hash offset.
+ * @version SoC version.
+ * @foe_entry_size Foe table entry size.
+ * @has_accounting Bool indicating support for accounting of
+ * offloaded flows.
+ * @txd_size Tx DMA descriptor size.
+ * @rxd_size Rx DMA descriptor size.
+ * @rx_irq_done_mask Rx irq done register mask.
+ * @rx_dma_l4_valid Rx DMA valid register mask.
+ * @dma_max_len Max DMA tx/rx buffer length.
+ * @dma_len_offset Tx/Rx DMA length field offset.
+ */
+struct mtk_soc_data {
+ const struct mtk_reg_map *reg_map;
+ u32 ana_rgc3;
+ u64 caps;
+ u64 required_clks;
+ bool required_pctl;
+ u8 offload_version;
+ u8 hash_offset;
+ u8 version;
+ u16 foe_entry_size;
+ netdev_features_t hw_features;
+ bool has_accounting;
+ bool disable_pll_modes;
+ struct {
+ u32 txd_size;
+ u32 rxd_size;
+ u32 rx_irq_done_mask;
+ u32 rx_dma_l4_valid;
+ u32 dma_max_len;
+ u32 dma_len_offset;
+ } txrx;
+};
+
+#define MTK_DMA_MONITOR_TIMEOUT msecs_to_jiffies(1000)
+
+/* currently no SoC has more than 3 macs */
+#define MTK_MAX_DEVS 3
+
+/* struct mtk_eth - This is the main datasructure for holding the state
+ * of the driver
+ * @dev: The device pointer
+ * @dev: The device pointer used for dma mapping/alloc
+ * @base: The mapped register i/o base
+ * @page_lock: Make sure that register operations are atomic
+ * @tx_irq__lock: Make sure that IRQ register operations are atomic
+ * @rx_irq__lock: Make sure that IRQ register operations are atomic
+ * @dim_lock: Make sure that Net DIM operations are atomic
+ * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a
+ * dummy for NAPI to work
+ * @netdev: The netdev instances
+ * @mac: Each netdev is linked to a physical MAC
+ * @irq: The IRQ that we are using
+ * @msg_enable: Ethtool msg level
+ * @ethsys: The register map pointing at the range used to setup
+ * MII modes
+ * @infra: The register map pointing at the range used to setup
+ * SGMII and GePHY path
+ * @sgmii_pcs: Pointers to mtk-pcs-lynxi phylink_pcs instances
+ * @pctl: The register map pointing at the range used to setup
+ * GMAC port drive/slew values
+ * @dma_refcnt: track how many netdevs are using the DMA engine
+ * @tx_ring: Pointer to the memory holding info about the TX ring
+ * @rx_ring: Pointer to the memory holding info about the RX ring
+ * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring
+ * @tx_napi: The TX NAPI struct
+ * @rx_napi: The RX NAPI struct
+ * @rx_events: Net DIM RX event counter
+ * @rx_packets: Net DIM RX packet counter
+ * @rx_bytes: Net DIM RX byte counter
+ * @rx_dim: Net DIM RX context
+ * @tx_events: Net DIM TX event counter
+ * @tx_packets: Net DIM TX packet counter
+ * @tx_bytes: Net DIM TX byte counter
+ * @tx_dim: Net DIM TX context
+ * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
+ * @phy_scratch_ring: physical address of scratch_ring
+ * @scratch_head: The scratch memory that scratch_ring points to.
+ * @clks: clock array for all clocks required
+ * @mii_bus: If there is a bus we need to create an instance for it
+ * @pending_work: The workqueue used to reset the dma ring
+ * @state: Initialization and runtime state of the device
+ * @soc: Holding specific data among vaious SoCs
+ */
+
+struct mtk_eth {
+ struct device *dev;
+ struct device *dma_dev;
+ void __iomem *base;
+ void *sram_base;
+ spinlock_t page_lock;
+ spinlock_t tx_irq_lock;
+ spinlock_t rx_irq_lock;
+ struct net_device dummy_dev;
+ struct net_device *netdev[MTK_MAX_DEVS];
+ struct mtk_mac *mac[MTK_MAX_DEVS];
+ int irq[3];
+ u32 msg_enable;
+ unsigned long sysclk;
+ struct regmap *ethsys;
+ struct regmap *infra;
+ struct phylink_pcs *sgmii_pcs[MTK_MAX_DEVS];
+ struct regmap *pctl;
+ bool hwlro;
+ refcount_t dma_refcnt;
+ struct mtk_tx_ring tx_ring;
+ struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM];
+ struct mtk_rx_ring rx_ring_qdma;
+ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
+ void *scratch_ring;
+ dma_addr_t phy_scratch_ring;
+ void *scratch_head;
+ struct clk *clks[MTK_CLK_MAX];
+
+ struct mii_bus *mii_bus;
+ struct work_struct pending_work;
+ unsigned long state;
+
+ const struct mtk_soc_data *soc;
+
+ spinlock_t dim_lock;
+
+ u32 rx_events;
+ u32 rx_packets;
+ u32 rx_bytes;
+ struct dim rx_dim;
+
+ u32 tx_events;
+ u32 tx_packets;
+ u32 tx_bytes;
+ struct dim tx_dim;
+
+ int ip_align;
+
+ struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS];
+
+ struct mtk_ppe *ppe[2];
+ struct rhashtable flow_table;
+
+ struct bpf_prog __rcu *prog;
+
+ struct {
+ struct delayed_work monitor_work;
+ u32 wdidx;
+ u8 wdma_hang_count;
+ u8 qdma_hang_count;
+ u8 adma_hang_count;
+ } reset;
+};
+
+/* struct mtk_mac - the structure that holds the info about the MACs of the
+ * SoC
+ * @id: The number of the MAC
+ * @interface: Interface mode kept for detecting change in hw settings
+ * @of_node: Our devicetree node
+ * @hw: Backpointer to our main datastruture
+ * @hw_stats: Packet statistics counter
+ */
+struct mtk_mac {
+ int id;
+ phy_interface_t interface;
+ int speed;
+ struct device_node *of_node;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+ struct mtk_eth *hw;
+ struct mtk_hw_stats *hw_stats;
+ __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT];
+ int hwlro_ip_cnt;
+ unsigned int syscfg0;
+ struct notifier_block device_notifier;
+};
+
+/* the struct describing the SoC. these are declared in the soc_xyz.c files */
+extern const struct of_device_id of_mtk_match[];
+
+static inline bool mtk_is_netsys_v1(struct mtk_eth *eth)
+{
+ return eth->soc->version == 1;
+}
+
+static inline bool mtk_is_netsys_v2_or_greater(struct mtk_eth *eth)
+{
+ return eth->soc->version > 1;
+}
+
+static inline bool mtk_is_netsys_v3_or_greater(struct mtk_eth *eth)
+{
+ return eth->soc->version > 2;
+}
+
+static inline struct mtk_foe_entry *
+mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+
+ return ppe->foe_table + hash * soc->foe_entry_size;
+}
+
+static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+
+ return MTK_FOE_IB1_BIND_TIMESTAMP;
+}
+
+static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_BIND_PPPOE_V2;
+
+ return MTK_FOE_IB1_BIND_PPPOE;
+}
+
+static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_BIND_VLAN_TAG_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_TAG;
+}
+
+static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_BIND_VLAN_LAYER_V2;
+
+ return MTK_FOE_IB1_BIND_VLAN_LAYER;
+}
+
+static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val);
+}
+
+static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB1_PACKET_TYPE_V2;
+
+ return MTK_FOE_IB1_PACKET_TYPE;
+}
+
+static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val);
+
+ return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val);
+}
+
+static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth)
+{
+ if (mtk_is_netsys_v2_or_greater(eth))
+ return MTK_FOE_IB2_MULTICAST_V2;
+
+ return MTK_FOE_IB2_MULTICAST;
+}
+
+/* read the hardware status register */
+void mtk_stats_update_mac(struct mtk_mac *mac);
+
+void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg);
+u32 mtk_r32(struct mtk_eth *eth, unsigned reg);
+u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned int reg);
+
+int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id);
+int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id);
+
+int mtk_eth_offload_init(struct mtk_eth *eth);
+int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data);
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
+ int ppe_index);
+void mtk_flow_offload_cleanup(struct mtk_eth *eth, struct list_head *list);
+void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev);
+
+
+#endif /* MTK_ETH_H */
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c
new file mode 100644
index 0000000000..86f32f4860
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.c
@@ -0,0 +1,1104 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <net/dst_metadata.h>
+#include <net/dsa.h>
+#include "mtk_eth_soc.h"
+#include "mtk_ppe.h"
+#include "mtk_ppe_regs.h"
+
+static DEFINE_SPINLOCK(ppe_lock);
+
+static const struct rhashtable_params mtk_flow_l2_ht_params = {
+ .head_offset = offsetof(struct mtk_flow_entry, l2_node),
+ .key_offset = offsetof(struct mtk_flow_entry, data.bridge),
+ .key_len = offsetof(struct mtk_foe_bridge, key_end),
+ .automatic_shrinking = true,
+};
+
+static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val)
+{
+ writel(val, ppe->base + reg);
+}
+
+static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg)
+{
+ return readl(ppe->base + reg);
+}
+
+static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set)
+{
+ u32 val;
+
+ val = ppe_r32(ppe, reg);
+ val &= ~mask;
+ val |= set;
+ ppe_w32(ppe, reg, val);
+
+ return val;
+}
+
+static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val)
+{
+ return ppe_m32(ppe, reg, 0, val);
+}
+
+static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val)
+{
+ return ppe_m32(ppe, reg, val, 0);
+}
+
+static u32 mtk_eth_timestamp(struct mtk_eth *eth)
+{
+ return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth);
+}
+
+static int mtk_ppe_wait_busy(struct mtk_ppe *ppe)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val,
+ !(val & MTK_PPE_GLO_CFG_BUSY),
+ 20, MTK_PPE_WAIT_TIMEOUT_US);
+
+ if (ret)
+ dev_err(ppe->dev, "PPE table busy");
+
+ return ret;
+}
+
+static int mtk_ppe_mib_wait_busy(struct mtk_ppe *ppe)
+{
+ int ret;
+ u32 val;
+
+ ret = readl_poll_timeout(ppe->base + MTK_PPE_MIB_SER_CR, val,
+ !(val & MTK_PPE_MIB_SER_CR_ST),
+ 20, MTK_PPE_WAIT_TIMEOUT_US);
+
+ if (ret)
+ dev_err(ppe->dev, "MIB table busy");
+
+ return ret;
+}
+
+static int mtk_mib_entry_read(struct mtk_ppe *ppe, u16 index, u64 *bytes, u64 *packets)
+{
+ u32 val, cnt_r0, cnt_r1, cnt_r2;
+ int ret;
+
+ val = FIELD_PREP(MTK_PPE_MIB_SER_CR_ADDR, index) | MTK_PPE_MIB_SER_CR_ST;
+ ppe_w32(ppe, MTK_PPE_MIB_SER_CR, val);
+
+ ret = mtk_ppe_mib_wait_busy(ppe);
+ if (ret)
+ return ret;
+
+ cnt_r0 = readl(ppe->base + MTK_PPE_MIB_SER_R0);
+ cnt_r1 = readl(ppe->base + MTK_PPE_MIB_SER_R1);
+ cnt_r2 = readl(ppe->base + MTK_PPE_MIB_SER_R2);
+
+ if (mtk_is_netsys_v3_or_greater(ppe->eth)) {
+ /* 64 bit for each counter */
+ u32 cnt_r3 = readl(ppe->base + MTK_PPE_MIB_SER_R3);
+ *bytes = ((u64)cnt_r1 << 32) | cnt_r0;
+ *packets = ((u64)cnt_r3 << 32) | cnt_r2;
+ } else {
+ /* 48 bit byte counter, 40 bit packet counter */
+ u32 byte_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW, cnt_r0);
+ u32 byte_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH, cnt_r1);
+ u32 pkt_cnt_low = FIELD_GET(MTK_PPE_MIB_SER_R1_PKT_CNT_LOW, cnt_r1);
+ u32 pkt_cnt_high = FIELD_GET(MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH, cnt_r2);
+ *bytes = ((u64)byte_cnt_high << 32) | byte_cnt_low;
+ *packets = ((u64)pkt_cnt_high << 16) | pkt_cnt_low;
+ }
+
+ return 0;
+}
+
+static void mtk_ppe_cache_clear(struct mtk_ppe *ppe)
+{
+ ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
+ ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR);
+}
+
+static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable)
+{
+ mtk_ppe_cache_clear(ppe);
+
+ ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN,
+ enable * MTK_PPE_CACHE_CTL_EN);
+}
+
+static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e)
+{
+ u32 hv1, hv2, hv3;
+ u32 hash;
+
+ switch (mtk_get_ib1_pkt_type(eth, e->ib1)) {
+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
+ hv1 = e->ipv4.orig.ports;
+ hv2 = e->ipv4.orig.dest_ip;
+ hv3 = e->ipv4.orig.src_ip;
+ break;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3];
+ hv1 ^= e->ipv6.ports;
+
+ hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2];
+ hv2 ^= e->ipv6.dest_ip[0];
+
+ hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1];
+ hv3 ^= e->ipv6.src_ip[0];
+ break;
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
+ default:
+ WARN_ON_ONCE(1);
+ return MTK_PPE_HASH_MASK;
+ }
+
+ hash = (hv1 & hv2) | ((~hv1) & hv3);
+ hash = (hash >> 24) | ((hash & 0xffffff) << 8);
+ hash ^= hv1 ^ hv2 ^ hv3;
+ hash ^= hash >> 16;
+ hash <<= (ffs(eth->soc->hash_offset) - 1);
+ hash &= MTK_PPE_ENTRIES - 1;
+
+ return hash;
+}
+
+static inline struct mtk_foe_mac_info *
+mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
+{
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.l2;
+
+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ return &entry->ipv6.l2;
+
+ return &entry->ipv4.l2;
+}
+
+static inline u32 *
+mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry)
+{
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return &entry->bridge.ib2;
+
+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ return &entry->ipv6.ib2;
+
+ return &entry->ipv4.ib2;
+}
+
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac)
+{
+ struct mtk_foe_mac_info *l2;
+ u32 ports_pad, val;
+
+ memset(entry, 0, sizeof(*entry));
+
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2;
+ entry->ib1 = val;
+
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf);
+ } else {
+ int port_mg = eth->soc->offload_version > 1 ? 0 : 0x3f;
+
+ val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) |
+ FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) |
+ FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) |
+ MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL;
+ entry->ib1 = val;
+
+ val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_MG, port_mg) |
+ FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f);
+ }
+
+ if (is_multicast_ether_addr(dest_mac))
+ val |= mtk_get_ib2_multicast_mask(eth);
+
+ ports_pad = 0xa5a5a500 | (l4proto & 0xff);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
+ entry->ipv4.orig.ports = ports_pad;
+ if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ entry->ipv6.ports = ports_pad;
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE) {
+ ether_addr_copy(entry->bridge.src_mac, src_mac);
+ ether_addr_copy(entry->bridge.dest_mac, dest_mac);
+ entry->bridge.ib2 = val;
+ l2 = &entry->bridge.l2;
+ } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+ entry->ipv6.ib2 = val;
+ l2 = &entry->ipv6.l2;
+ } else {
+ entry->ipv4.ib2 = val;
+ l2 = &entry->ipv4.l2;
+ }
+
+ l2->dest_mac_hi = get_unaligned_be32(dest_mac);
+ l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4);
+ l2->src_mac_hi = get_unaligned_be32(src_mac);
+ l2->src_mac_lo = get_unaligned_be16(src_mac + 4);
+
+ if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T)
+ l2->etype = ETH_P_IPV6;
+ else
+ l2->etype = ETH_P_IP;
+
+ return 0;
+}
+
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port)
+{
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+ u32 val = *ib2;
+
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ val &= ~MTK_FOE_IB2_DEST_PORT_V2;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port);
+ } else {
+ val &= ~MTK_FOE_IB2_DEST_PORT;
+ val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port);
+ }
+ *ib2 = val;
+
+ return 0;
+}
+
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool egress,
+ __be32 src_addr, __be16 src_port,
+ __be32 dest_addr, __be16 dest_port)
+{
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+ struct mtk_ipv4_tuple *t;
+
+ switch (type) {
+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
+ if (egress) {
+ t = &entry->ipv4.new;
+ break;
+ }
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
+ t = &entry->ipv4.orig;
+ break;
+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
+ entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr);
+ entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr);
+ return 0;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ t->src_ip = be32_to_cpu(src_addr);
+ t->dest_ip = be32_to_cpu(dest_addr);
+
+ if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE)
+ return 0;
+
+ t->src_port = be16_to_cpu(src_port);
+ t->dest_port = be16_to_cpu(dest_port);
+
+ return 0;
+}
+
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
+ __be32 *src_addr, __be16 src_port,
+ __be32 *dest_addr, __be16 dest_port)
+{
+ int type = mtk_get_ib1_pkt_type(eth, entry->ib1);
+ u32 *src, *dest;
+ int i;
+
+ switch (type) {
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ src = entry->dslite.tunnel_src_ip;
+ dest = entry->dslite.tunnel_dest_ip;
+ break;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
+ entry->ipv6.src_port = be16_to_cpu(src_port);
+ entry->ipv6.dest_port = be16_to_cpu(dest_port);
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ src = entry->ipv6.src_ip;
+ dest = entry->ipv6.dest_ip;
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < 4; i++)
+ src[i] = be32_to_cpu(src_addr[i]);
+ for (i = 0; i < 4; i++)
+ dest[i] = be32_to_cpu(dest_addr[i]);
+
+ return 0;
+}
+
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+
+ l2->etype = BIT(port);
+
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)))
+ entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1);
+ else
+ l2->etype |= BIT(8);
+
+ entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth);
+
+ return 0;
+}
+
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+
+ switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) {
+ case 0:
+ entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) |
+ mtk_prep_ib1_vlan_layer(eth, 1);
+ l2->vlan1 = vid;
+ return 0;
+ case 1:
+ if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) {
+ l2->vlan1 = vid;
+ l2->etype |= BIT(8);
+ } else {
+ l2->vlan2 = vid;
+ entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1);
+ }
+ return 0;
+ default:
+ return -ENOSPC;
+ }
+}
+
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+
+ if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) ||
+ (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth)))
+ l2->etype = ETH_P_PPP_SES;
+
+ entry->ib1 |= mtk_get_ib1_ppoe_mask(eth);
+ l2->pppoe_id = sid;
+
+ return 0;
+}
+
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid)
+{
+ struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry);
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+
+ switch (eth->soc->version) {
+ case 3:
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->w3info = FIELD_PREP(MTK_FOE_WINFO_WCID_V3, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS_V3, bss);
+ break;
+ case 2:
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) |
+ MTK_FOE_IB2_WDMA_WINFO_V2;
+ l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_WINFO_BSS, bss);
+ break;
+ default:
+ *ib2 &= ~MTK_FOE_IB2_PORT_MG;
+ *ib2 |= MTK_FOE_IB2_WDMA_WINFO;
+ if (wdma_idx)
+ *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX;
+ l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) |
+ FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq);
+ break;
+ }
+
+ return 0;
+}
+
+int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ unsigned int queue)
+{
+ u32 *ib2 = mtk_foe_entry_ib2(eth, entry);
+
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ *ib2 &= ~MTK_FOE_IB2_QID_V2;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID_V2, queue);
+ *ib2 |= MTK_FOE_IB2_PSE_QOS_V2;
+ } else {
+ *ib2 &= ~MTK_FOE_IB2_QID;
+ *ib2 |= FIELD_PREP(MTK_FOE_IB2_QID, queue);
+ *ib2 |= MTK_FOE_IB2_PSE_QOS;
+ }
+
+ return 0;
+}
+
+static bool
+mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry,
+ struct mtk_foe_entry *data)
+{
+ int type, len;
+
+ if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP)
+ return false;
+
+ type = mtk_get_ib1_pkt_type(eth, entry->data.ib1);
+ if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE)
+ len = offsetof(struct mtk_foe_entry, ipv6._rsv);
+ else
+ len = offsetof(struct mtk_foe_entry, ipv4.ib2);
+
+ return !memcmp(&entry->data.data, &data->data, len - 4);
+}
+
+static void
+__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct hlist_head *head;
+ struct hlist_node *tmp;
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+
+ head = &entry->l2_flows;
+ hlist_for_each_entry_safe(entry, tmp, head, l2_data.list)
+ __mtk_foe_entry_clear(ppe, entry);
+ return;
+ }
+
+ hlist_del_init(&entry->list);
+ if (entry->hash != 0xffff) {
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash);
+
+ hwe->ib1 &= ~MTK_FOE_IB1_STATE;
+ hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID);
+ dma_wmb();
+ mtk_ppe_cache_clear(ppe);
+
+ if (ppe->accounting) {
+ struct mtk_foe_accounting *acct;
+
+ acct = ppe->acct_table + entry->hash * sizeof(*acct);
+ acct->packets = 0;
+ acct->bytes = 0;
+ }
+ }
+ entry->hash = 0xffff;
+
+ if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW)
+ return;
+
+ hlist_del_init(&entry->l2_data.list);
+ kfree(entry);
+}
+
+static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1)
+{
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ u16 now = mtk_eth_timestamp(ppe->eth);
+ u16 timestamp = ib1 & ib1_ts_mask;
+
+ if (timestamp > now)
+ return ib1_ts_mask + 1 - timestamp + now;
+ else
+ return now - timestamp;
+}
+
+static void
+mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth);
+ struct mtk_flow_entry *cur;
+ struct mtk_foe_entry *hwe;
+ struct hlist_node *tmp;
+ int idle;
+
+ idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+ hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) {
+ int cur_idle;
+ u32 ib1;
+
+ hwe = mtk_foe_get_entry(ppe, cur->hash);
+ ib1 = READ_ONCE(hwe->ib1);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) {
+ cur->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, cur);
+ continue;
+ }
+
+ cur_idle = __mtk_foe_entry_idle_time(ppe, ib1);
+ if (cur_idle >= idle)
+ continue;
+
+ idle = cur_idle;
+ entry->data.ib1 &= ~ib1_ts_mask;
+ entry->data.ib1 |= hwe->ib1 & ib1_ts_mask;
+ }
+}
+
+static void
+mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct mtk_foe_entry foe = {};
+ struct mtk_foe_entry *hwe;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (entry->type == MTK_FLOW_TYPE_L2) {
+ mtk_flow_entry_update_l2(ppe, entry);
+ goto out;
+ }
+
+ if (entry->hash == 0xffff)
+ goto out;
+
+ hwe = mtk_foe_get_entry(ppe, entry->hash);
+ memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size);
+ if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) {
+ entry->hash = 0xffff;
+ goto out;
+ }
+
+ entry->data.ib1 = foe.ib1;
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+static void
+__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry,
+ u16 hash)
+{
+ struct mtk_eth *eth = ppe->eth;
+ u16 timestamp = mtk_eth_timestamp(eth);
+ struct mtk_foe_entry *hwe;
+ u32 val;
+
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2,
+ timestamp);
+ } else {
+ entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP;
+ entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP,
+ timestamp);
+ }
+
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1));
+ wmb();
+ hwe->ib1 = entry->ib1;
+
+ if (ppe->accounting) {
+ if (mtk_is_netsys_v2_or_greater(eth))
+ val = MTK_FOE_IB2_MIB_CNT_V2;
+ else
+ val = MTK_FOE_IB2_MIB_CNT;
+ *mtk_foe_entry_ib2(eth, hwe) |= val;
+ }
+
+ dma_wmb();
+
+ mtk_ppe_cache_clear(ppe);
+}
+
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ spin_lock_bh(&ppe_lock);
+ __mtk_foe_entry_clear(ppe, entry);
+ spin_unlock_bh(&ppe_lock);
+}
+
+static int
+mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ struct mtk_flow_entry *prev;
+
+ entry->type = MTK_FLOW_TYPE_L2;
+
+ prev = rhashtable_lookup_get_insert_fast(&ppe->l2_flows, &entry->l2_node,
+ mtk_flow_l2_ht_params);
+ if (likely(!prev))
+ return 0;
+
+ if (IS_ERR(prev))
+ return PTR_ERR(prev);
+
+ return rhashtable_replace_fast(&ppe->l2_flows, &prev->l2_node,
+ &entry->l2_node, mtk_flow_l2_ht_params);
+}
+
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1);
+ u32 hash;
+
+ if (type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return mtk_foe_entry_commit_l2(ppe, entry);
+
+ hash = mtk_ppe_hash_entry(ppe->eth, &entry->data);
+ entry->hash = 0xffff;
+ spin_lock_bh(&ppe_lock);
+ hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]);
+ spin_unlock_bh(&ppe_lock);
+
+ return 0;
+}
+
+static void
+mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry,
+ u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct mtk_flow_entry *flow_info;
+ struct mtk_foe_entry foe = {}, *hwe;
+ struct mtk_foe_mac_info *l2;
+ u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP;
+ int type;
+
+ flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC);
+ if (!flow_info)
+ return;
+
+ flow_info->l2_data.base_flow = entry;
+ flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW;
+ flow_info->hash = hash;
+ hlist_add_head(&flow_info->list,
+ &ppe->foe_flow[hash / soc->hash_offset]);
+ hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows);
+
+ hwe = mtk_foe_get_entry(ppe, hash);
+ memcpy(&foe, hwe, soc->foe_entry_size);
+ foe.ib1 &= ib1_mask;
+ foe.ib1 |= entry->data.ib1 & ~ib1_mask;
+
+ l2 = mtk_foe_entry_l2(ppe->eth, &foe);
+ memcpy(l2, &entry->data.bridge.l2, sizeof(*l2));
+
+ type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1);
+ if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT)
+ memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new));
+ else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP)
+ l2->etype = ETH_P_IPV6;
+
+ *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2;
+
+ __mtk_foe_entry_commit(ppe, &foe, hash);
+}
+
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+{
+ const struct mtk_soc_data *soc = ppe->eth->soc;
+ struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset];
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash);
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_bridge key = {};
+ struct hlist_node *n;
+ struct ethhdr *eh;
+ bool found = false;
+ u8 *tag;
+
+ spin_lock_bh(&ppe_lock);
+
+ if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND)
+ goto out;
+
+ hlist_for_each_entry_safe(entry, n, head, list) {
+ if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) {
+ if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) ==
+ MTK_FOE_STATE_BIND))
+ continue;
+
+ entry->hash = 0xffff;
+ __mtk_foe_entry_clear(ppe, entry);
+ continue;
+ }
+
+ if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) {
+ if (entry->hash != 0xffff)
+ entry->hash = 0xffff;
+ continue;
+ }
+
+ entry->hash = hash;
+ __mtk_foe_entry_commit(ppe, &entry->data, hash);
+ found = true;
+ }
+
+ if (found)
+ goto out;
+
+ eh = eth_hdr(skb);
+ ether_addr_copy(key.dest_mac, eh->h_dest);
+ ether_addr_copy(key.src_mac, eh->h_source);
+ tag = skb->data - 2;
+ key.vlan = 0;
+ switch (skb->protocol) {
+#if IS_ENABLED(CONFIG_NET_DSA)
+ case htons(ETH_P_XDSA):
+ if (!netdev_uses_dsa(skb->dev) ||
+ skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ goto out;
+
+ if (!skb_metadata_dst(skb))
+ tag += 4;
+
+ if (get_unaligned_be16(tag) != ETH_P_8021Q)
+ break;
+
+ fallthrough;
+#endif
+ case htons(ETH_P_8021Q):
+ key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK;
+ break;
+ default:
+ break;
+ }
+
+ entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params);
+ if (!entry)
+ goto out;
+
+ mtk_foe_entry_commit_subflow(ppe, entry, hash);
+
+out:
+ spin_unlock_bh(&ppe_lock);
+}
+
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
+{
+ mtk_flow_entry_update(ppe, entry);
+
+ return __mtk_foe_entry_idle_time(ppe, entry->data.ib1);
+}
+
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe)
+{
+ if (!ppe)
+ return -EINVAL;
+
+ /* disable KA */
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
+ ppe_clear(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0);
+ usleep_range(10000, 11000);
+
+ /* set KA timer to maximum */
+ ppe_set(ppe, MTK_PPE_BIND_LMT1, MTK_PPE_NTU_KEEPALIVE);
+ ppe_w32(ppe, MTK_PPE_KEEPALIVE, 0xffffffff);
+
+ /* set KA tick select */
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_TICK_SEL);
+ ppe_set(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_KEEPALIVE);
+ usleep_range(10000, 11000);
+
+ /* disable scan mode */
+ ppe_clear(ppe, MTK_PPE_TB_CFG, MTK_PPE_TB_CFG_SCAN_MODE);
+ usleep_range(10000, 11000);
+
+ return mtk_ppe_wait_busy(ppe);
+}
+
+struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
+ struct mtk_foe_accounting *diff)
+{
+ struct mtk_foe_accounting *acct;
+ int size = sizeof(struct mtk_foe_accounting);
+ u64 bytes, packets;
+
+ if (!ppe->accounting)
+ return NULL;
+
+ if (mtk_mib_entry_read(ppe, index, &bytes, &packets))
+ return NULL;
+
+ acct = ppe->acct_table + index * size;
+
+ acct->bytes += bytes;
+ acct->packets += packets;
+
+ if (diff) {
+ diff->bytes = bytes;
+ diff->packets = packets;
+ }
+
+ return acct;
+}
+
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index)
+{
+ bool accounting = eth->soc->has_accounting;
+ const struct mtk_soc_data *soc = eth->soc;
+ struct mtk_foe_accounting *acct;
+ struct device *dev = eth->dev;
+ struct mtk_mib_entry *mib;
+ struct mtk_ppe *ppe;
+ u32 foe_flow_size;
+ void *foe;
+
+ ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL);
+ if (!ppe)
+ return NULL;
+
+ rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params);
+
+ /* need to allocate a separate device, since it PPE DMA access is
+ * not coherent.
+ */
+ ppe->base = base;
+ ppe->eth = eth;
+ ppe->dev = dev;
+ ppe->version = eth->soc->offload_version;
+ ppe->accounting = accounting;
+
+ foe = dmam_alloc_coherent(ppe->dev,
+ MTK_PPE_ENTRIES * soc->foe_entry_size,
+ &ppe->foe_phys, GFP_KERNEL);
+ if (!foe)
+ goto err_free_l2_flows;
+
+ ppe->foe_table = foe;
+
+ foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) *
+ sizeof(*ppe->foe_flow);
+ ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL);
+ if (!ppe->foe_flow)
+ goto err_free_l2_flows;
+
+ if (accounting) {
+ mib = dmam_alloc_coherent(ppe->dev, MTK_PPE_ENTRIES * sizeof(*mib),
+ &ppe->mib_phys, GFP_KERNEL);
+ if (!mib)
+ return NULL;
+
+ ppe->mib_table = mib;
+
+ acct = devm_kzalloc(dev, MTK_PPE_ENTRIES * sizeof(*acct),
+ GFP_KERNEL);
+
+ if (!acct)
+ return NULL;
+
+ ppe->acct_table = acct;
+ }
+
+ mtk_ppe_debugfs_init(ppe, index);
+
+ return ppe;
+
+err_free_l2_flows:
+ rhashtable_destroy(&ppe->l2_flows);
+ return NULL;
+}
+
+void mtk_ppe_deinit(struct mtk_eth *eth)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) {
+ if (!eth->ppe[i])
+ return;
+ rhashtable_destroy(&eth->ppe[i]->l2_flows);
+ }
+}
+
+static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
+{
+ static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 };
+ int i, k;
+
+ memset(ppe->foe_table, 0,
+ MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size);
+
+ if (!IS_ENABLED(CONFIG_SOC_MT7621))
+ return;
+
+ /* skip all entries that cross the 1024 byte boundary */
+ for (i = 0; i < MTK_PPE_ENTRIES; i += 128) {
+ for (k = 0; k < ARRAY_SIZE(skip); k++) {
+ struct mtk_foe_entry *hwe;
+
+ hwe = mtk_foe_get_entry(ppe, i + skip[k]);
+ hwe->ib1 |= MTK_FOE_IB1_STATIC;
+ }
+ }
+}
+
+void mtk_ppe_start(struct mtk_ppe *ppe)
+{
+ u32 val;
+
+ if (!ppe)
+ return;
+
+ mtk_ppe_init_foe_table(ppe);
+ ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
+
+ val = MTK_PPE_TB_CFG_AGE_NON_L4 |
+ MTK_PPE_TB_CFG_AGE_UNBIND |
+ MTK_PPE_TB_CFG_AGE_TCP |
+ MTK_PPE_TB_CFG_AGE_UDP |
+ MTK_PPE_TB_CFG_AGE_TCP_FIN |
+ FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS,
+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) |
+ FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE,
+ MTK_PPE_KEEPALIVE_DISABLE) |
+ FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) |
+ FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE,
+ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) |
+ FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM,
+ MTK_PPE_ENTRIES_SHIFT);
+ if (mtk_is_netsys_v2_or_greater(ppe->eth))
+ val |= MTK_PPE_TB_CFG_INFO_SEL;
+ if (!mtk_is_netsys_v3_or_greater(ppe->eth))
+ val |= MTK_PPE_TB_CFG_ENTRY_80B;
+ ppe_w32(ppe, MTK_PPE_TB_CFG, val);
+
+ ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK,
+ MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6);
+
+ mtk_ppe_cache_enable(ppe, true);
+
+ val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE |
+ MTK_PPE_FLOW_CFG_IP6_5T_ROUTE |
+ MTK_PPE_FLOW_CFG_IP6_6RD |
+ MTK_PPE_FLOW_CFG_IP4_NAT |
+ MTK_PPE_FLOW_CFG_IP4_NAPT |
+ MTK_PPE_FLOW_CFG_IP4_DSLITE |
+ MTK_PPE_FLOW_CFG_IP4_NAT_FRAG;
+ if (mtk_is_netsys_v2_or_greater(ppe->eth))
+ val |= MTK_PPE_MD_TOAP_BYP_CRSN0 |
+ MTK_PPE_MD_TOAP_BYP_CRSN1 |
+ MTK_PPE_MD_TOAP_BYP_CRSN2 |
+ MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY;
+ else
+ val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG |
+ MTK_PPE_FLOW_CFG_IP4_UDP_FRAG;
+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, val);
+
+ val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) |
+ FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3);
+ ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val);
+
+ val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) |
+ FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1);
+ ppe_w32(ppe, MTK_PPE_BIND_AGE0, val);
+
+ val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) |
+ FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7);
+ ppe_w32(ppe, MTK_PPE_BIND_AGE1, val);
+
+ val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF;
+ ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val);
+
+ val = MTK_PPE_BIND_LIMIT1_FULL |
+ FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1);
+ ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val);
+
+ val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) |
+ FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1);
+ ppe_w32(ppe, MTK_PPE_BIND_RATE, val);
+
+ /* enable PPE */
+ val = MTK_PPE_GLO_CFG_EN |
+ MTK_PPE_GLO_CFG_IP4_L4_CS_DROP |
+ MTK_PPE_GLO_CFG_IP4_CS_DROP |
+ MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE;
+ ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
+
+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
+
+ if (mtk_is_netsys_v2_or_greater(ppe->eth)) {
+ ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777);
+ ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f);
+ }
+
+ if (ppe->accounting && ppe->mib_phys) {
+ ppe_w32(ppe, MTK_PPE_MIB_TB_BASE, ppe->mib_phys);
+ ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_EN,
+ MTK_PPE_MIB_CFG_EN);
+ ppe_m32(ppe, MTK_PPE_MIB_CFG, MTK_PPE_MIB_CFG_RD_CLR,
+ MTK_PPE_MIB_CFG_RD_CLR);
+ ppe_m32(ppe, MTK_PPE_MIB_CACHE_CTL, MTK_PPE_MIB_CACHE_CTL_EN,
+ MTK_PPE_MIB_CFG_RD_CLR);
+ }
+}
+
+int mtk_ppe_stop(struct mtk_ppe *ppe)
+{
+ u32 val;
+ int i;
+
+ if (!ppe)
+ return 0;
+
+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
+ struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i);
+
+ hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
+ MTK_FOE_STATE_INVALID);
+ }
+
+ mtk_ppe_cache_enable(ppe, false);
+
+ /* disable offload engine */
+ ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN);
+ ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0);
+
+ /* disable aging */
+ val = MTK_PPE_TB_CFG_AGE_NON_L4 |
+ MTK_PPE_TB_CFG_AGE_UNBIND |
+ MTK_PPE_TB_CFG_AGE_TCP |
+ MTK_PPE_TB_CFG_AGE_UDP |
+ MTK_PPE_TB_CFG_AGE_TCP_FIN;
+ ppe_clear(ppe, MTK_PPE_TB_CFG, val);
+
+ return mtk_ppe_wait_busy(ppe);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h
new file mode 100644
index 0000000000..e3d0ec72bc
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe.h
@@ -0,0 +1,405 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_PPE_H
+#define __MTK_PPE_H
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <linux/rhashtable.h>
+
+#define MTK_PPE_ENTRIES_SHIFT 3
+#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT)
+#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1)
+#define MTK_PPE_WAIT_TIMEOUT_US 1000000
+
+#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0)
+#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8)
+#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24)
+
+#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0)
+#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15)
+#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16)
+#define MTK_FOE_IB1_BIND_PPPOE BIT(19)
+#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20)
+#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21)
+#define MTK_FOE_IB1_BIND_CACHE BIT(22)
+#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23)
+#define MTK_FOE_IB1_BIND_TTL BIT(24)
+
+#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25)
+#define MTK_FOE_IB1_STATE GENMASK(29, 28)
+#define MTK_FOE_IB1_UDP BIT(30)
+#define MTK_FOE_IB1_STATIC BIT(31)
+
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0)
+#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14)
+#define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17)
+#define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18)
+#define MTK_FOE_IB1_BIND_CACHE_V2 BIT(20)
+#define MTK_FOE_IB1_BIND_TTL_V2 BIT(22)
+#define MTK_FOE_IB1_PACKET_TYPE_V2 GENMASK(27, 23)
+
+enum {
+ MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0,
+ MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1,
+ MTK_PPE_PKT_TYPE_BRIDGE = 2,
+ MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3,
+ MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4,
+ MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5,
+ MTK_PPE_PKT_TYPE_IPV6_6RD = 7,
+};
+
+#define MTK_FOE_IB2_QID GENMASK(3, 0)
+#define MTK_FOE_IB2_PSE_QOS BIT(4)
+#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5)
+#define MTK_FOE_IB2_MULTICAST BIT(8)
+#define MTK_FOE_IB2_MIB_CNT BIT(10)
+
+#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12)
+#define MTK_FOE_IB2_MIB_CNT_V2 BIT(15)
+#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16)
+#define MTK_FOE_IB2_WDMA_WINFO BIT(17)
+
+#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12)
+
+#define MTK_FOE_IB2_RX_IDX GENMASK(18, 17)
+#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18)
+
+#define MTK_FOE_IB2_DSCP GENMASK(31, 24)
+
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_FOE_IB2_QID_V2 GENMASK(6, 0)
+#define MTK_FOE_IB2_PORT_MG_V2 BIT(7)
+#define MTK_FOE_IB2_PSE_QOS_V2 BIT(8)
+#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9)
+#define MTK_FOE_IB2_MULTICAST_V2 BIT(13)
+#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19)
+#define MTK_FOE_IB2_PORT_AG_V2 GENMASK(23, 20)
+
+#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6)
+#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14)
+
+#define MTK_FOE_WINFO_BSS GENMASK(5, 0)
+#define MTK_FOE_WINFO_WCID GENMASK(15, 6)
+
+#define MTK_FOE_WINFO_BSS_V3 GENMASK(23, 16)
+#define MTK_FOE_WINFO_WCID_V3 GENMASK(15, 0)
+
+#define MTK_FOE_WINFO_PAO_USR_INFO GENMASK(15, 0)
+#define MTK_FOE_WINFO_PAO_TID GENMASK(19, 16)
+#define MTK_FOE_WINFO_PAO_IS_FIXEDRATE BIT(20)
+#define MTK_FOE_WINFO_PAO_IS_PRIOR BIT(21)
+#define MTK_FOE_WINFO_PAO_IS_SP BIT(22)
+#define MTK_FOE_WINFO_PAO_HF BIT(23)
+#define MTK_FOE_WINFO_PAO_AMSDU_EN BIT(24)
+
+enum {
+ MTK_FOE_STATE_INVALID,
+ MTK_FOE_STATE_UNBIND,
+ MTK_FOE_STATE_BIND,
+ MTK_FOE_STATE_FIN
+};
+
+struct mtk_foe_mac_info {
+ u16 vlan1;
+ u16 etype;
+
+ u32 dest_mac_hi;
+
+ u16 vlan2;
+ u16 dest_mac_lo;
+
+ u32 src_mac_hi;
+
+ u16 pppoe_id;
+ u16 src_mac_lo;
+
+ /* netsys_v2 */
+ u16 minfo;
+ u16 winfo;
+
+ /* netsys_v3 */
+ u32 w3info;
+ u32 wpao;
+};
+
+/* software-only entry type */
+struct mtk_foe_bridge {
+ u8 dest_mac[ETH_ALEN];
+ u8 src_mac[ETH_ALEN];
+ u16 vlan;
+
+ struct {} key_end;
+
+ u32 ib2;
+
+ struct mtk_foe_mac_info l2;
+};
+
+struct mtk_ipv4_tuple {
+ u32 src_ip;
+ u32 dest_ip;
+ union {
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ };
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ };
+ u32 ports;
+ };
+};
+
+struct mtk_foe_ipv4 {
+ struct mtk_ipv4_tuple orig;
+
+ u32 ib2;
+
+ struct mtk_ipv4_tuple new;
+
+ u16 timestamp;
+ u16 _rsv0[3];
+
+ u32 udf_tsid;
+
+ struct mtk_foe_mac_info l2;
+};
+
+struct mtk_foe_ipv4_dslite {
+ struct mtk_ipv4_tuple ip4;
+
+ u32 tunnel_src_ip[4];
+ u32 tunnel_dest_ip[4];
+
+ u8 flow_label[3];
+ u8 priority;
+
+ u32 udf_tsid;
+
+ u32 ib2;
+
+ struct mtk_foe_mac_info l2;
+};
+
+struct mtk_foe_ipv6 {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+
+ union {
+ struct {
+ u8 protocol;
+ u8 _pad[3]; /* fill with 0xa5a5a5 */
+ }; /* 3-tuple */
+ struct {
+ u16 dest_port;
+ u16 src_port;
+ }; /* 5-tuple */
+ u32 ports;
+ };
+
+ u32 _rsv[3];
+
+ u32 udf;
+
+ u32 ib2;
+ struct mtk_foe_mac_info l2;
+};
+
+struct mtk_foe_ipv6_6rd {
+ u32 src_ip[4];
+ u32 dest_ip[4];
+ u16 dest_port;
+ u16 src_port;
+
+ u32 tunnel_src_ip;
+ u32 tunnel_dest_ip;
+
+ u16 hdr_csum;
+ u8 dscp;
+ u8 ttl;
+
+ u8 flag;
+ u8 pad;
+ u8 per_flow_6rd_id;
+ u8 pad2;
+
+ u32 ib2;
+ struct mtk_foe_mac_info l2;
+};
+
+#define MTK_FOE_ENTRY_V1_SIZE 80
+#define MTK_FOE_ENTRY_V2_SIZE 96
+#define MTK_FOE_ENTRY_V3_SIZE 128
+
+struct mtk_foe_entry {
+ u32 ib1;
+
+ union {
+ struct mtk_foe_bridge bridge;
+ struct mtk_foe_ipv4 ipv4;
+ struct mtk_foe_ipv4_dslite dslite;
+ struct mtk_foe_ipv6 ipv6;
+ struct mtk_foe_ipv6_6rd ipv6_6rd;
+ u32 data[31];
+ };
+};
+
+enum {
+ MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02,
+ MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03,
+ MTK_PPE_CPU_REASON_NO_FLOW = 0x07,
+ MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08,
+ MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09,
+ MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a,
+ MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b,
+ MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c,
+ MTK_PPE_CPU_REASON_UN_HIT = 0x0d,
+ MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e,
+ MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f,
+ MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10,
+ MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11,
+ MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12,
+ MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13,
+ MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14,
+ MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15,
+ MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16,
+ MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17,
+ MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18,
+ MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19,
+ MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a,
+ MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b,
+ MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c,
+ MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e,
+ MTK_PPE_CPU_REASON_INVALID = 0x1f,
+};
+
+enum {
+ MTK_FLOW_TYPE_L4,
+ MTK_FLOW_TYPE_L2,
+ MTK_FLOW_TYPE_L2_SUBFLOW,
+};
+
+struct mtk_flow_entry {
+ union {
+ struct hlist_node list;
+ struct {
+ struct rhash_head l2_node;
+ struct hlist_head l2_flows;
+ };
+ };
+ u8 type;
+ s8 wed_index;
+ u8 ppe_index;
+ u16 hash;
+ union {
+ struct mtk_foe_entry data;
+ struct {
+ struct mtk_flow_entry *base_flow;
+ struct hlist_node list;
+ } l2_data;
+ };
+ struct rhash_head node;
+ unsigned long cookie;
+};
+
+struct mtk_mib_entry {
+ u32 byt_cnt_l;
+ u16 byt_cnt_h;
+ u32 pkt_cnt_l;
+ u8 pkt_cnt_h;
+ u8 _rsv0;
+ u32 _rsv1;
+} __packed;
+
+struct mtk_foe_accounting {
+ u64 bytes;
+ u64 packets;
+};
+
+struct mtk_ppe {
+ struct mtk_eth *eth;
+ struct device *dev;
+ void __iomem *base;
+ int version;
+ char dirname[5];
+ bool accounting;
+
+ void *foe_table;
+ dma_addr_t foe_phys;
+
+ struct mtk_mib_entry *mib_table;
+ dma_addr_t mib_phys;
+
+ u16 foe_check_time[MTK_PPE_ENTRIES];
+ struct hlist_head *foe_flow;
+
+ struct rhashtable l2_flows;
+
+ void *acct_table;
+};
+
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int index);
+
+void mtk_ppe_deinit(struct mtk_eth *eth);
+void mtk_ppe_start(struct mtk_ppe *ppe);
+int mtk_ppe_stop(struct mtk_ppe *ppe);
+int mtk_ppe_prepare_reset(struct mtk_ppe *ppe);
+
+void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
+
+static inline void
+mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash)
+{
+ u16 now, diff;
+
+ if (!ppe)
+ return;
+
+ if (hash > MTK_PPE_HASH_MASK)
+ return;
+
+ now = (u16)jiffies;
+ diff = now - ppe->foe_check_time[hash];
+ if (diff < HZ / 10)
+ return;
+
+ ppe->foe_check_time[hash] = now;
+ __mtk_ppe_check_skb(ppe, skb, hash);
+}
+
+int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int type, int l4proto, u8 pse_port, u8 *src_mac,
+ u8 *dest_mac);
+int mtk_foe_entry_set_pse_port(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, u8 port);
+int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry, bool orig,
+ __be32 src_addr, __be16 src_port,
+ __be32 dest_addr, __be16 dest_port);
+int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth,
+ struct mtk_foe_entry *entry,
+ __be32 *src_addr, __be16 src_port,
+ __be32 *dest_addr, __be16 dest_port);
+int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int port);
+int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int vid);
+int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int sid);
+int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ int wdma_idx, int txq, int bss, int wcid);
+int mtk_foe_entry_set_queue(struct mtk_eth *eth, struct mtk_foe_entry *entry,
+ unsigned int queue);
+int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
+struct mtk_foe_accounting *mtk_foe_entry_get_mib(struct mtk_ppe *ppe, u32 index,
+ struct mtk_foe_accounting *diff);
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
new file mode 100644
index 0000000000..1a97feca77
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include "mtk_eth_soc.h"
+
+struct mtk_flow_addr_info
+{
+ void *src, *dest;
+ u16 *src_port, *dest_port;
+ bool ipv6;
+};
+
+static const char *mtk_foe_entry_state_str(int state)
+{
+ static const char * const state_str[] = {
+ [MTK_FOE_STATE_INVALID] = "INV",
+ [MTK_FOE_STATE_UNBIND] = "UNB",
+ [MTK_FOE_STATE_BIND] = "BND",
+ [MTK_FOE_STATE_FIN] = "FIN",
+ };
+
+ if (state >= ARRAY_SIZE(state_str) || !state_str[state])
+ return "UNK";
+
+ return state_str[state];
+}
+
+static const char *mtk_foe_pkt_type_str(int type)
+{
+ static const char * const type_str[] = {
+ [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T",
+ [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T",
+ [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE",
+ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T",
+ [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T",
+ [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD",
+ };
+
+ if (type >= ARRAY_SIZE(type_str) || !type_str[type])
+ return "UNKNOWN";
+
+ return type_str[type];
+}
+
+static void
+mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6)
+{
+ __be32 n_addr[4];
+ int i;
+
+ if (!ipv6) {
+ seq_printf(m, "%pI4h", addr);
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(n_addr); i++)
+ n_addr[i] = htonl(addr[i]);
+ seq_printf(m, "%pI6", n_addr);
+}
+
+static void
+mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai)
+{
+ mtk_print_addr(m, ai->src, ai->ipv6);
+ if (ai->src_port)
+ seq_printf(m, ":%d", *ai->src_port);
+ seq_printf(m, "->");
+ mtk_print_addr(m, ai->dest, ai->ipv6);
+ if (ai->dest_port)
+ seq_printf(m, ":%d", *ai->dest_port);
+}
+
+static int
+mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind)
+{
+ struct mtk_ppe *ppe = m->private;
+ int i;
+
+ for (i = 0; i < MTK_PPE_ENTRIES; i++) {
+ struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i);
+ struct mtk_foe_mac_info *l2;
+ struct mtk_flow_addr_info ai = {};
+ struct mtk_foe_accounting *acct;
+ unsigned char h_source[ETH_ALEN];
+ unsigned char h_dest[ETH_ALEN];
+ int type, state;
+ u32 ib2;
+
+
+ state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1);
+ if (!state)
+ continue;
+
+ if (bind && state != MTK_FOE_STATE_BIND)
+ continue;
+
+ acct = mtk_foe_entry_get_mib(ppe, i, NULL);
+
+ type = mtk_get_ib1_pkt_type(ppe->eth, entry->ib1);
+ seq_printf(m, "%05x %s %7s", i,
+ mtk_foe_entry_state_str(state),
+ mtk_foe_pkt_type_str(type));
+
+ switch (type) {
+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ ai.src_port = &entry->ipv4.orig.src_port;
+ ai.dest_port = &entry->ipv4.orig.dest_port;
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
+ ai.src = &entry->ipv4.orig.src_ip;
+ ai.dest = &entry->ipv4.orig.dest_ip;
+ break;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T:
+ ai.src_port = &entry->ipv6.src_port;
+ ai.dest_port = &entry->ipv6.dest_port;
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T:
+ case MTK_PPE_PKT_TYPE_IPV6_6RD:
+ ai.src = &entry->ipv6.src_ip;
+ ai.dest = &entry->ipv6.dest_ip;
+ ai.ipv6 = true;
+ break;
+ }
+
+ seq_printf(m, " orig=");
+ mtk_print_addr_info(m, &ai);
+
+ switch (type) {
+ case MTK_PPE_PKT_TYPE_IPV4_HNAPT:
+ case MTK_PPE_PKT_TYPE_IPV4_DSLITE:
+ ai.src_port = &entry->ipv4.new.src_port;
+ ai.dest_port = &entry->ipv4.new.dest_port;
+ fallthrough;
+ case MTK_PPE_PKT_TYPE_IPV4_ROUTE:
+ ai.src = &entry->ipv4.new.src_ip;
+ ai.dest = &entry->ipv4.new.dest_ip;
+ seq_printf(m, " new=");
+ mtk_print_addr_info(m, &ai);
+ break;
+ }
+
+ if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) {
+ l2 = &entry->ipv6.l2;
+ ib2 = entry->ipv6.ib2;
+ } else {
+ l2 = &entry->ipv4.l2;
+ ib2 = entry->ipv4.ib2;
+ }
+
+ *((__be32 *)h_source) = htonl(l2->src_mac_hi);
+ *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo);
+ *((__be32 *)h_dest) = htonl(l2->dest_mac_hi);
+ *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo);
+
+ seq_printf(m, " eth=%pM->%pM etype=%04x"
+ " vlan=%d,%d ib1=%08x ib2=%08x"
+ " packets=%llu bytes=%llu\n",
+ h_source, h_dest, ntohs(l2->etype),
+ l2->vlan1, l2->vlan2, entry->ib1, ib2,
+ acct ? acct->packets : 0, acct ? acct->bytes : 0);
+ }
+
+ return 0;
+}
+
+static int
+mtk_ppe_debugfs_foe_all_show(struct seq_file *m, void *private)
+{
+ return mtk_ppe_debugfs_foe_show(m, private, false);
+}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_all);
+
+static int
+mtk_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private)
+{
+ return mtk_ppe_debugfs_foe_show(m, private, true);
+}
+DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_bind);
+
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
+{
+ struct dentry *root;
+
+ snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
+
+ root = debugfs_create_dir(ppe->dirname, NULL);
+ debugfs_create_file("entries", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_all_fops);
+ debugfs_create_file("bind", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_bind_fops);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
new file mode 100644
index 0000000000..a4efbeb162
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c
@@ -0,0 +1,635 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
+ */
+
+#include <linux/if_ether.h>
+#include <linux/rhashtable.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/flow_offload.h>
+#include <net/pkt_cls.h>
+#include <net/dsa.h>
+#include "mtk_eth_soc.h"
+#include "mtk_wed.h"
+
+struct mtk_flow_data {
+ struct ethhdr eth;
+
+ union {
+ struct {
+ __be32 src_addr;
+ __be32 dst_addr;
+ } v4;
+
+ struct {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ } v6;
+ };
+
+ __be16 src_port;
+ __be16 dst_port;
+
+ u16 vlan_in;
+
+ struct {
+ u16 id;
+ __be16 proto;
+ u8 num;
+ } vlan;
+ struct {
+ u16 sid;
+ u8 num;
+ } pppoe;
+};
+
+static const struct rhashtable_params mtk_flow_ht_params = {
+ .head_offset = offsetof(struct mtk_flow_entry, node),
+ .key_offset = offsetof(struct mtk_flow_entry, cookie),
+ .key_len = sizeof(unsigned long),
+ .automatic_shrinking = true,
+};
+
+static int
+mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data, bool egress)
+{
+ return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
+ data->v4.src_addr, data->src_port,
+ data->v4.dst_addr, data->dst_port);
+}
+
+static int
+mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct mtk_flow_data *data)
+{
+ return mtk_foe_entry_set_ipv6_tuple(eth, foe,
+ data->v6.src_addr.s6_addr32, data->src_port,
+ data->v6.dst_addr.s6_addr32, data->dst_port);
+}
+
+static void
+mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
+{
+ void *dest = eth + act->mangle.offset;
+ const void *src = &act->mangle.val;
+
+ if (act->mangle.offset > 8)
+ return;
+
+ if (act->mangle.mask == 0xffff) {
+ src += 2;
+ dest += 2;
+ }
+
+ memcpy(dest, src, act->mangle.mask ? 2 : 4);
+}
+
+static int
+mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
+{
+ struct net_device_path_stack stack;
+ struct net_device_path *path;
+ int err;
+
+ if (!dev)
+ return -ENODEV;
+
+ if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
+ return -1;
+
+ err = dev_fill_forward_path(dev, addr, &stack);
+ if (err)
+ return err;
+
+ path = &stack.path[stack.num_paths - 1];
+ if (path->type != DEV_PATH_MTK_WDMA)
+ return -1;
+
+ info->wdma_idx = path->mtk_wdma.wdma_idx;
+ info->queue = path->mtk_wdma.queue;
+ info->bss = path->mtk_wdma.bss;
+ info->wcid = path->mtk_wdma.wcid;
+
+ return 0;
+}
+
+
+static int
+mtk_flow_mangle_ports(const struct flow_action_entry *act,
+ struct mtk_flow_data *data)
+{
+ u32 val = ntohl(act->mangle.val);
+
+ switch (act->mangle.offset) {
+ case 0:
+ if (act->mangle.mask == ~htonl(0xffff))
+ data->dst_port = cpu_to_be16(val);
+ else
+ data->src_port = cpu_to_be16(val >> 16);
+ break;
+ case 2:
+ data->dst_port = cpu_to_be16(val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
+ struct mtk_flow_data *data)
+{
+ __be32 *dest;
+
+ switch (act->mangle.offset) {
+ case offsetof(struct iphdr, saddr):
+ dest = &data->v4.src_addr;
+ break;
+ case offsetof(struct iphdr, daddr):
+ dest = &data->v4.dst_addr;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ memcpy(dest, &act->mangle.val, sizeof(u32));
+
+ return 0;
+}
+
+static int
+mtk_flow_get_dsa_port(struct net_device **dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ struct dsa_port *dp;
+
+ dp = dsa_port_from_netdev(*dev);
+ if (IS_ERR(dp))
+ return -ENODEV;
+
+ if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
+ return -ENODEV;
+
+ *dev = dsa_port_to_master(dp);
+
+ return dp->index;
+#else
+ return -ENODEV;
+#endif
+}
+
+static int
+mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
+ struct net_device *dev, const u8 *dest_mac,
+ int *wed_index)
+{
+ struct mtk_wdma_info info = {};
+ int pse_port, dsa_port, queue;
+
+ if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
+ mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
+ info.bss, info.wcid);
+ if (mtk_is_netsys_v2_or_greater(eth)) {
+ switch (info.wdma_idx) {
+ case 0:
+ pse_port = 8;
+ break;
+ case 1:
+ pse_port = 9;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pse_port = 3;
+ }
+ *wed_index = info.wdma_idx;
+ goto out;
+ }
+
+ dsa_port = mtk_flow_get_dsa_port(&dev);
+
+ if (dev == eth->netdev[0])
+ pse_port = PSE_GDM1_PORT;
+ else if (dev == eth->netdev[1])
+ pse_port = PSE_GDM2_PORT;
+ else if (dev == eth->netdev[2])
+ pse_port = PSE_GDM3_PORT;
+ else
+ return -EOPNOTSUPP;
+
+ if (dsa_port >= 0) {
+ mtk_foe_entry_set_dsa(eth, foe, dsa_port);
+ queue = 3 + dsa_port;
+ } else {
+ queue = pse_port - 1;
+ }
+ mtk_foe_entry_set_queue(eth, foe, queue);
+
+out:
+ mtk_foe_entry_set_pse_port(eth, foe, pse_port);
+
+ return 0;
+}
+
+static int
+mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
+ int ppe_index)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(f);
+ struct flow_action_entry *act;
+ struct mtk_flow_data data = {};
+ struct mtk_foe_entry foe;
+ struct net_device *odev = NULL;
+ struct mtk_flow_entry *entry;
+ int offload_type = 0;
+ int wed_index = -1;
+ u16 addr_type = 0;
+ u8 l4proto = 0;
+ int err = 0;
+ int i;
+
+ if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
+ return -EEXIST;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
+ struct flow_match_meta match;
+
+ flow_rule_match_meta(rule, &match);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
+ struct flow_match_control match;
+
+ flow_rule_match_control(rule, &match);
+ addr_type = match.key->addr_type;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
+ struct flow_match_basic match;
+
+ flow_rule_match_basic(rule, &match);
+ l4proto = match.key->ip_proto;
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ switch (addr_type) {
+ case 0:
+ offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ struct flow_match_eth_addrs match;
+
+ flow_rule_match_eth_addrs(rule, &match);
+ memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
+ memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
+ } else {
+ return -EOPNOTSUPP;
+ }
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
+ struct flow_match_vlan match;
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan_in = match.key->vlan_id;
+ }
+ break;
+ case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
+ break;
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
+ offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_MANGLE:
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+ if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
+ mtk_flow_offload_mangle_eth(act, &data.eth);
+ break;
+ case FLOW_ACTION_REDIRECT:
+ odev = act->dev;
+ break;
+ case FLOW_ACTION_CSUM:
+ break;
+ case FLOW_ACTION_VLAN_PUSH:
+ if (data.vlan.num == 1 ||
+ act->vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ data.vlan.id = act->vlan.vid;
+ data.vlan.proto = act->vlan.proto;
+ data.vlan.num++;
+ break;
+ case FLOW_ACTION_VLAN_POP:
+ break;
+ case FLOW_ACTION_PPPOE_PUSH:
+ if (data.pppoe.num == 1)
+ return -EOPNOTSUPP;
+
+ data.pppoe.sid = act->pppoe.sid;
+ data.pppoe.num++;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+
+ if (!is_valid_ether_addr(data.eth.h_source) ||
+ !is_valid_ether_addr(data.eth.h_dest))
+ return -EINVAL;
+
+ err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
+ data.eth.h_source, data.eth.h_dest);
+ if (err)
+ return err;
+
+ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
+ struct flow_match_ports ports;
+
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ flow_rule_match_ports(rule, &ports);
+ data.src_port = ports.key->src;
+ data.dst_port = ports.key->dst;
+ } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
+ return -EOPNOTSUPP;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ struct flow_match_ipv4_addrs addrs;
+
+ flow_rule_match_ipv4_addrs(rule, &addrs);
+
+ data.v4.src_addr = addrs.key->src;
+ data.v4.dst_addr = addrs.key->dst;
+
+ mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_match_ipv6_addrs addrs;
+
+ flow_rule_match_ipv6_addrs(rule, &addrs);
+
+ data.v6.src_addr = addrs.key->src;
+ data.v6.dst_addr = addrs.key->dst;
+
+ mtk_flow_set_ipv6_addr(eth, &foe, &data);
+ }
+
+ flow_action_for_each(i, act, &rule->action) {
+ if (act->id != FLOW_ACTION_MANGLE)
+ continue;
+
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ return -EOPNOTSUPP;
+
+ switch (act->mangle.htype) {
+ case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
+ case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
+ err = mtk_flow_mangle_ports(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
+ err = mtk_flow_mangle_ipv4(act, &data);
+ break;
+ case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
+ /* handled earlier */
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (err)
+ return err;
+ }
+
+ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
+ err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
+ if (err)
+ return err;
+ }
+
+ if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
+ foe.bridge.vlan = data.vlan_in;
+
+ if (data.vlan.num == 1) {
+ if (data.vlan.proto != htons(ETH_P_8021Q))
+ return -EOPNOTSUPP;
+
+ mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
+ }
+ if (data.pppoe.num == 1)
+ mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
+
+ err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
+ &wed_index);
+ if (err)
+ return err;
+
+ if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
+ return err;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->cookie = f->cookie;
+ memcpy(&entry->data, &foe, sizeof(entry->data));
+ entry->wed_index = wed_index;
+ entry->ppe_index = ppe_index;
+
+ err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
+ if (err < 0)
+ goto free;
+
+ err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
+ mtk_flow_ht_params);
+ if (err < 0)
+ goto clear;
+
+ return 0;
+
+clear:
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
+free:
+ kfree(entry);
+ if (wed_index >= 0)
+ mtk_wed_flow_remove(wed_index);
+ return err;
+}
+
+static int
+mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
+{
+ struct mtk_flow_entry *entry;
+
+ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ mtk_flow_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
+ rhashtable_remove_fast(&eth->flow_table, &entry->node,
+ mtk_flow_ht_params);
+ if (entry->wed_index >= 0)
+ mtk_wed_flow_remove(entry->wed_index);
+ kfree(entry);
+
+ return 0;
+}
+
+static int
+mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
+{
+ struct mtk_flow_entry *entry;
+ struct mtk_foe_accounting diff;
+ u32 idle;
+
+ entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
+ mtk_flow_ht_params);
+ if (!entry)
+ return -ENOENT;
+
+ idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
+ f->stats.lastused = jiffies - idle * HZ;
+
+ if (entry->hash != 0xFFFF &&
+ mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
+ &diff)) {
+ f->stats.pkts += diff.packets;
+ f->stats.bytes += diff.bytes;
+ }
+
+ return 0;
+}
+
+static DEFINE_MUTEX(mtk_flow_offload_mutex);
+
+int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
+ int ppe_index)
+{
+ int err;
+
+ mutex_lock(&mtk_flow_offload_mutex);
+ switch (cls->command) {
+ case FLOW_CLS_REPLACE:
+ err = mtk_flow_offload_replace(eth, cls, ppe_index);
+ break;
+ case FLOW_CLS_DESTROY:
+ err = mtk_flow_offload_destroy(eth, cls);
+ break;
+ case FLOW_CLS_STATS:
+ err = mtk_flow_offload_stats(eth, cls);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+ mutex_unlock(&mtk_flow_offload_mutex);
+
+ return err;
+}
+
+static int
+mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct flow_cls_offload *cls = type_data;
+ struct net_device *dev = cb_priv;
+ struct mtk_mac *mac;
+ struct mtk_eth *eth;
+
+ mac = netdev_priv(dev);
+ eth = mac->hw;
+
+ if (!tc_can_offload(dev))
+ return -EOPNOTSUPP;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ return mtk_flow_offload_cmd(eth, cls, 0);
+}
+
+static int
+mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
+{
+ struct mtk_mac *mac = netdev_priv(dev);
+ struct mtk_eth *eth = mac->hw;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+ flow_setup_cb_t *cb;
+
+ if (!eth->soc->offload_version)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ cb = mtk_eth_setup_tc_block_cb;
+ f->driver_block_list = &block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+ block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
+ if (IS_ERR(block_cb))
+ return PTR_ERR(block_cb);
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return mtk_eth_setup_tc_block(dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+int mtk_eth_offload_init(struct mtk_eth *eth)
+{
+ return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
new file mode 100644
index 0000000000..3ce088eef0
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_PPE_REGS_H
+#define __MTK_PPE_REGS_H
+
+#define MTK_PPE_GLO_CFG 0x200
+#define MTK_PPE_GLO_CFG_EN BIT(0)
+#define MTK_PPE_GLO_CFG_TSID_EN BIT(1)
+#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2)
+#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3)
+#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4)
+#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5)
+#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6)
+#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7)
+#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8)
+#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9)
+#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10)
+#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11)
+#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12)
+#define MTK_PPE_GLO_CFG_BUSY BIT(31)
+
+#define MTK_PPE_FLOW_CFG 0x204
+#define MTK_PPE_MD_TOAP_BYP_CRSN0 BIT(1)
+#define MTK_PPE_MD_TOAP_BYP_CRSN1 BIT(2)
+#define MTK_PPE_MD_TOAP_BYP_CRSN2 BIT(3)
+#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6)
+#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7)
+#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8)
+#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9)
+#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10)
+#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12)
+#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13)
+#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14)
+#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15)
+#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16)
+#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17)
+#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18)
+#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19)
+#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20)
+
+#define MTK_PPE_IP_PROTO_CHK 0x208
+#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0)
+#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16)
+
+#define MTK_PPE_TB_CFG 0x21c
+#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0)
+#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3)
+#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4)
+#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6)
+#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7)
+#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8)
+#define MTK_PPE_TB_CFG_AGE_TCP BIT(9)
+#define MTK_PPE_TB_CFG_AGE_UDP BIT(10)
+#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11)
+#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12)
+#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14)
+#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16)
+#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18)
+#define MTK_PPE_TB_CFG_INFO_SEL BIT(20)
+#define MTK_PPE_TB_TICK_SEL BIT(24)
+
+#define MTK_PPE_BIND_LMT1 0x230
+#define MTK_PPE_NTU_KEEPALIVE GENMASK(23, 16)
+
+#define MTK_PPE_KEEPALIVE 0x234
+
+enum {
+ MTK_PPE_SCAN_MODE_DISABLED,
+ MTK_PPE_SCAN_MODE_CHECK_AGE,
+ MTK_PPE_SCAN_MODE_KEEPALIVE_AGE,
+};
+
+enum {
+ MTK_PPE_KEEPALIVE_DISABLE,
+ MTK_PPE_KEEPALIVE_UNICAST_CPU,
+ MTK_PPE_KEEPALIVE_DUP_CPU = 3,
+};
+
+enum {
+ MTK_PPE_SEARCH_MISS_ACTION_DROP,
+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2,
+ MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3,
+};
+
+#define MTK_PPE_TB_BASE 0x220
+
+#define MTK_PPE_TB_USED 0x224
+#define MTK_PPE_TB_USED_NUM GENMASK(13, 0)
+
+#define MTK_PPE_BIND_RATE 0x228
+#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0)
+#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16)
+
+#define MTK_PPE_BIND_LIMIT0 0x22c
+#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0)
+#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16)
+
+#define MTK_PPE_BIND_LIMIT1 0x230
+#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0)
+#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16)
+
+#define MTK_PPE_KEEPALIVE 0x234
+#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0)
+#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16)
+#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24)
+
+#define MTK_PPE_UNBIND_AGE 0x238
+#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16)
+#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0)
+
+#define MTK_PPE_BIND_AGE0 0x23c
+#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16)
+#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0)
+
+#define MTK_PPE_BIND_AGE1 0x240
+#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16)
+#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0)
+
+#define MTK_PPE_HASH_SEED 0x244
+
+#define MTK_PPE_DEFAULT_CPU_PORT 0x248
+#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4))
+
+#define MTK_PPE_DEFAULT_CPU_PORT1 0x24c
+
+#define MTK_PPE_MTU_DROP 0x308
+
+#define MTK_PPE_VLAN_MTU0 0x30c
+#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0)
+#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16)
+
+#define MTK_PPE_VLAN_MTU1 0x310
+#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0)
+#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16)
+
+#define MTK_PPE_VPM_TPID 0x318
+
+#define MTK_PPE_CACHE_CTL 0x320
+#define MTK_PPE_CACHE_CTL_EN BIT(0)
+#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4)
+#define MTK_PPE_CACHE_CTL_REQ BIT(8)
+#define MTK_PPE_CACHE_CTL_CLEAR BIT(9)
+#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12)
+
+#define MTK_PPE_MIB_CFG 0x334
+#define MTK_PPE_MIB_CFG_EN BIT(0)
+#define MTK_PPE_MIB_CFG_RD_CLR BIT(1)
+
+#define MTK_PPE_MIB_TB_BASE 0x338
+
+#define MTK_PPE_MIB_SER_CR 0x33C
+#define MTK_PPE_MIB_SER_CR_ST BIT(16)
+#define MTK_PPE_MIB_SER_CR_ADDR GENMASK(13, 0)
+
+#define MTK_PPE_MIB_SER_R0 0x340
+#define MTK_PPE_MIB_SER_R0_BYTE_CNT_LOW GENMASK(31, 0)
+
+#define MTK_PPE_MIB_SER_R1 0x344
+#define MTK_PPE_MIB_SER_R1_PKT_CNT_LOW GENMASK(31, 16)
+#define MTK_PPE_MIB_SER_R1_BYTE_CNT_HIGH GENMASK(15, 0)
+
+#define MTK_PPE_MIB_SER_R2 0x348
+#define MTK_PPE_MIB_SER_R2_PKT_CNT_HIGH GENMASK(23, 0)
+
+#define MTK_PPE_MIB_SER_R3 0x34c
+
+#define MTK_PPE_MIB_CACHE_CTL 0x350
+#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0)
+#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2)
+
+#define MTK_PPE_SBW_CTRL 0x374
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c
new file mode 100644
index 0000000000..31aebeb2e2
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c
@@ -0,0 +1,1754 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 MediaTek Corporation
+ * Copyright (c) 2020 BayLibre SAS
+ *
+ * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+ */
+
+#include <linux/bits.h>
+#include <linux/clk.h>
+#include <linux/compiler.h>
+#include <linux/dma-mapping.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+#define MTK_STAR_DRVNAME "mtk_star_emac"
+
+#define MTK_STAR_WAIT_TIMEOUT 300
+#define MTK_STAR_MAX_FRAME_SIZE 1514
+#define MTK_STAR_SKB_ALIGNMENT 16
+#define MTK_STAR_HASHTABLE_MC_LIMIT 256
+#define MTK_STAR_HASHTABLE_SIZE_MAX 512
+#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't
+ * work for this controller.
+ */
+#define MTK_STAR_IP_ALIGN 2
+
+static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" };
+#define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names)
+
+/* PHY Control Register 0 */
+#define MTK_STAR_REG_PHY_CTRL0 0x0000
+#define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13)
+#define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14)
+#define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15)
+#define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8)
+#define MTK_STAR_OFF_PHY_CTRL0_PREG 8
+#define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16)
+#define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16
+
+/* PHY Control Register 1 */
+#define MTK_STAR_REG_PHY_CTRL1 0x0004
+#define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0)
+#define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8)
+#define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9
+#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00
+#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01
+#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02
+#define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11)
+#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12)
+#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13)
+
+/* MAC Configuration Register */
+#define MTK_STAR_REG_MAC_CFG 0x0008
+#define MTK_STAR_OFF_MAC_CFG_IPG 10
+#define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0)
+#define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16)
+#define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19)
+#define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20)
+#define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22)
+#define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31)
+
+/* Flow-Control Configuration Register */
+#define MTK_STAR_REG_FC_CFG 0x000c
+#define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7)
+#define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8)
+#define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16
+#define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16)
+#define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800
+
+/* ARL Configuration Register */
+#define MTK_STAR_REG_ARL_CFG 0x0010
+#define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0)
+#define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4)
+
+/* MAC High and Low Bytes Registers */
+#define MTK_STAR_REG_MY_MAC_H 0x0014
+#define MTK_STAR_REG_MY_MAC_L 0x0018
+
+/* Hash Table Control Register */
+#define MTK_STAR_REG_HASH_CTRL 0x001c
+#define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0)
+#define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12)
+#define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13)
+#define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14)
+#define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16)
+#define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17)
+#define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31)
+
+/* TX DMA Control Register */
+#define MTK_STAR_REG_TX_DMA_CTRL 0x0034
+#define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0)
+#define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1)
+#define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2)
+
+/* RX DMA Control Register */
+#define MTK_STAR_REG_RX_DMA_CTRL 0x0038
+#define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0)
+#define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1)
+#define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2)
+
+/* DMA Address Registers */
+#define MTK_STAR_REG_TX_DPTR 0x003c
+#define MTK_STAR_REG_RX_DPTR 0x0040
+#define MTK_STAR_REG_TX_BASE_ADDR 0x0044
+#define MTK_STAR_REG_RX_BASE_ADDR 0x0048
+
+/* Interrupt Status Register */
+#define MTK_STAR_REG_INT_STS 0x0050
+#define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2)
+#define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3)
+#define MTK_STAR_BIT_INT_STS_FNRC BIT(6)
+#define MTK_STAR_BIT_INT_STS_TNTC BIT(8)
+
+/* Interrupt Mask Register */
+#define MTK_STAR_REG_INT_MASK 0x0054
+#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6)
+
+/* Delay-Macro Register */
+#define MTK_STAR_REG_TEST0 0x0058
+#define MTK_STAR_BIT_INV_RX_CLK BIT(30)
+#define MTK_STAR_BIT_INV_TX_CLK BIT(31)
+
+/* Misc. Config Register */
+#define MTK_STAR_REG_TEST1 0x005c
+#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31)
+
+/* Extended Configuration Register */
+#define MTK_STAR_REG_EXT_CFG 0x0060
+#define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16
+#define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16)
+#define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400
+
+/* EthSys Configuration Register */
+#define MTK_STAR_REG_SYS_CONF 0x0094
+#define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0)
+#define MTK_STAR_BIT_EXT_MDC_MODE BIT(1)
+#define MTK_STAR_BIT_SWC_MII_MODE BIT(2)
+
+/* MAC Clock Configuration Register */
+#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac
+#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0)
+#define MTK_STAR_BIT_CLK_DIV_10 0x0a
+#define MTK_STAR_BIT_CLK_DIV_50 0x32
+
+/* Counter registers. */
+#define MTK_STAR_REG_C_RXOKPKT 0x0100
+#define MTK_STAR_REG_C_RXOKBYTE 0x0104
+#define MTK_STAR_REG_C_RXRUNT 0x0108
+#define MTK_STAR_REG_C_RXLONG 0x010c
+#define MTK_STAR_REG_C_RXDROP 0x0110
+#define MTK_STAR_REG_C_RXCRC 0x0114
+#define MTK_STAR_REG_C_RXARLDROP 0x0118
+#define MTK_STAR_REG_C_RXVLANDROP 0x011c
+#define MTK_STAR_REG_C_RXCSERR 0x0120
+#define MTK_STAR_REG_C_RXPAUSE 0x0124
+#define MTK_STAR_REG_C_TXOKPKT 0x0128
+#define MTK_STAR_REG_C_TXOKBYTE 0x012c
+#define MTK_STAR_REG_C_TXPAUSECOL 0x0130
+#define MTK_STAR_REG_C_TXRTY 0x0134
+#define MTK_STAR_REG_C_TXSKIP 0x0138
+#define MTK_STAR_REG_C_TX_ARP 0x013c
+#define MTK_STAR_REG_C_RX_RERR 0x01d8
+#define MTK_STAR_REG_C_RX_UNI 0x01dc
+#define MTK_STAR_REG_C_RX_MULTI 0x01e0
+#define MTK_STAR_REG_C_RX_BROAD 0x01e4
+#define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8
+#define MTK_STAR_REG_C_TX_UNI 0x01ec
+#define MTK_STAR_REG_C_TX_MULTI 0x01f0
+#define MTK_STAR_REG_C_TX_BROAD 0x01f4
+#define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8
+#define MTK_STAR_REG_C_TX_LATECOL 0x01fc
+#define MTK_STAR_REG_C_RX_LENGTHERR 0x0214
+#define MTK_STAR_REG_C_RX_TWIST 0x0218
+
+/* Ethernet CFG Control */
+#define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4
+#define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8
+#define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10
+#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0
+#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0)
+#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8)
+
+/* Represents the actual structure of descriptors used by the MAC. We can
+ * reuse the same structure for both TX and RX - the layout is the same, only
+ * the flags differ slightly.
+ */
+struct mtk_star_ring_desc {
+ /* Contains both the status flags as well as packet length. */
+ u32 status;
+ u32 data_ptr;
+ u32 vtag;
+ u32 reserved;
+};
+
+#define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0)
+#define MTK_STAR_DESC_BIT_RX_CRCE BIT(24)
+#define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25)
+#define MTK_STAR_DESC_BIT_INT BIT(27)
+#define MTK_STAR_DESC_BIT_LS BIT(28)
+#define MTK_STAR_DESC_BIT_FS BIT(29)
+#define MTK_STAR_DESC_BIT_EOR BIT(30)
+#define MTK_STAR_DESC_BIT_COWN BIT(31)
+
+/* Helper structure for storing data read from/written to descriptors in order
+ * to limit reads from/writes to DMA memory.
+ */
+struct mtk_star_ring_desc_data {
+ unsigned int len;
+ unsigned int flags;
+ dma_addr_t dma_addr;
+ struct sk_buff *skb;
+};
+
+#define MTK_STAR_RING_NUM_DESCS 512
+#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4)
+#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS
+#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS
+#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2)
+#define MTK_STAR_DMA_SIZE \
+ (MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc))
+
+struct mtk_star_ring {
+ struct mtk_star_ring_desc *descs;
+ struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS];
+ dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS];
+ unsigned int head;
+ unsigned int tail;
+};
+
+struct mtk_star_compat {
+ int (*set_interface_mode)(struct net_device *ndev);
+ unsigned char bit_clk_div;
+};
+
+struct mtk_star_priv {
+ struct net_device *ndev;
+
+ struct regmap *regs;
+ struct regmap *pericfg;
+
+ struct clk_bulk_data clks[MTK_STAR_NCLKS];
+
+ void *ring_base;
+ struct mtk_star_ring_desc *descs_base;
+ dma_addr_t dma_addr;
+ struct mtk_star_ring tx_ring;
+ struct mtk_star_ring rx_ring;
+
+ struct mii_bus *mii;
+ struct napi_struct tx_napi;
+ struct napi_struct rx_napi;
+
+ struct device_node *phy_node;
+ phy_interface_t phy_intf;
+ struct phy_device *phydev;
+ unsigned int link;
+ int speed;
+ int duplex;
+ int pause;
+ bool rmii_rxc;
+ bool rx_inv;
+ bool tx_inv;
+
+ const struct mtk_star_compat *compat_data;
+
+ /* Protects against concurrent descriptor access. */
+ spinlock_t lock;
+
+ struct rtnl_link_stats64 stats;
+};
+
+static struct device *mtk_star_get_dev(struct mtk_star_priv *priv)
+{
+ return priv->ndev->dev.parent;
+}
+
+static const struct regmap_config mtk_star_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .disable_locking = true,
+};
+
+static void mtk_star_ring_init(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc *descs)
+{
+ memset(ring, 0, sizeof(*ring));
+ ring->descs = descs;
+ ring->head = 0;
+ ring->tail = 0;
+}
+
+static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ struct mtk_star_ring_desc *desc = &ring->descs[ring->tail];
+ unsigned int status;
+
+ status = READ_ONCE(desc->status);
+ dma_rmb(); /* Make sure we read the status bits before checking it. */
+
+ if (!(status & MTK_STAR_DESC_BIT_COWN))
+ return -1;
+
+ desc_data->len = status & MTK_STAR_DESC_MSK_LEN;
+ desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN;
+ desc_data->dma_addr = ring->dma_addrs[ring->tail];
+ desc_data->skb = ring->skbs[ring->tail];
+
+ ring->dma_addrs[ring->tail] = 0;
+ ring->skbs[ring->tail] = NULL;
+
+ status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR;
+
+ WRITE_ONCE(desc->data_ptr, 0);
+ WRITE_ONCE(desc->status, status);
+
+ ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS;
+
+ return 0;
+}
+
+static void mtk_star_ring_push_head(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data,
+ unsigned int flags)
+{
+ struct mtk_star_ring_desc *desc = &ring->descs[ring->head];
+ unsigned int status;
+
+ status = READ_ONCE(desc->status);
+
+ ring->skbs[ring->head] = desc_data->skb;
+ ring->dma_addrs[ring->head] = desc_data->dma_addr;
+
+ status |= desc_data->len;
+ if (flags)
+ status |= flags;
+
+ WRITE_ONCE(desc->data_ptr, desc_data->dma_addr);
+ WRITE_ONCE(desc->status, status);
+ status &= ~MTK_STAR_DESC_BIT_COWN;
+ /* Flush previous modifications before ownership change. */
+ dma_wmb();
+ WRITE_ONCE(desc->status, status);
+
+ ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS;
+}
+
+static void
+mtk_star_ring_push_head_rx(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ mtk_star_ring_push_head(ring, desc_data, 0);
+}
+
+static void
+mtk_star_ring_push_head_tx(struct mtk_star_ring *ring,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ static const unsigned int flags = MTK_STAR_DESC_BIT_FS |
+ MTK_STAR_DESC_BIT_LS |
+ MTK_STAR_DESC_BIT_INT;
+
+ mtk_star_ring_push_head(ring, desc_data, flags);
+}
+
+static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring)
+{
+ u32 avail;
+
+ if (ring->tail > ring->head)
+ avail = ring->tail - ring->head - 1;
+ else
+ avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1;
+
+ return avail;
+}
+
+static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv,
+ struct sk_buff *skb)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ /* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */
+ return dma_map_single(dev, skb_tail_pointer(skb) - 2,
+ skb_tailroom(skb), DMA_FROM_DEVICE);
+}
+
+static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ dma_unmap_single(dev, desc_data->dma_addr,
+ skb_tailroom(desc_data->skb), DMA_FROM_DEVICE);
+}
+
+static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv,
+ struct sk_buff *skb)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+}
+
+static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv,
+ struct mtk_star_ring_desc_data *desc_data)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+
+ return dma_unmap_single(dev, desc_data->dma_addr,
+ skb_headlen(desc_data->skb), DMA_TO_DEVICE);
+}
+
+static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv)
+{
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
+ MTK_STAR_BIT_MAC_CFG_NIC_PD);
+}
+
+static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value &= ~MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value &= ~MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
+static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv,
+ bool rx, bool tx)
+{
+ u32 value;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value);
+
+ if (tx)
+ value |= MTK_STAR_BIT_INT_STS_TNTC;
+ if (rx)
+ value |= MTK_STAR_BIT_INT_STS_FNRC;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value);
+}
+
+/* Unmask the three interrupts we care about, mask all others. */
+static void mtk_star_intr_enable(struct mtk_star_priv *priv)
+{
+ unsigned int val = MTK_STAR_BIT_INT_STS_TNTC |
+ MTK_STAR_BIT_INT_STS_FNRC |
+ MTK_STAR_REG_INT_STS_MIB_CNT_TH;
+
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val);
+}
+
+static void mtk_star_intr_disable(struct mtk_star_priv *priv)
+{
+ regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0);
+}
+
+static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val);
+ regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val);
+
+ return val;
+}
+
+static void mtk_star_dma_init(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring_desc *desc;
+ unsigned int val;
+ int i;
+
+ priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base;
+
+ for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) {
+ desc = &priv->descs_base[i];
+
+ memset(desc, 0, sizeof(*desc));
+ desc->status = MTK_STAR_DESC_BIT_COWN;
+ if ((i == MTK_STAR_NUM_TX_DESCS - 1) ||
+ (i == MTK_STAR_NUM_DESCS_TOTAL - 1))
+ desc->status |= MTK_STAR_DESC_BIT_EOR;
+ }
+
+ mtk_star_ring_init(&priv->tx_ring, priv->descs_base);
+ mtk_star_ring_init(&priv->rx_ring,
+ priv->descs_base + MTK_STAR_NUM_TX_DESCS);
+
+ /* Set DMA pointers. */
+ val = (unsigned int)priv->dma_addr;
+ regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val);
+ regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val);
+
+ val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS;
+ regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val);
+ regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val);
+}
+
+static void mtk_star_dma_start(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
+ MTK_STAR_BIT_TX_DMA_CTRL_START);
+ regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
+ MTK_STAR_BIT_RX_DMA_CTRL_START);
+}
+
+static void mtk_star_dma_stop(struct mtk_star_priv *priv)
+{
+ regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
+ MTK_STAR_BIT_TX_DMA_CTRL_STOP);
+ regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
+ MTK_STAR_BIT_RX_DMA_CTRL_STOP);
+}
+
+static void mtk_star_dma_disable(struct mtk_star_priv *priv)
+{
+ int i;
+
+ mtk_star_dma_stop(priv);
+
+ /* Take back all descriptors. */
+ for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++)
+ priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN;
+}
+
+static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL,
+ MTK_STAR_BIT_RX_DMA_CTRL_RESUME);
+}
+
+static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv)
+{
+ regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL,
+ MTK_STAR_BIT_TX_DMA_CTRL_RESUME);
+}
+
+static void mtk_star_set_mac_addr(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ const u8 *mac_addr = ndev->dev_addr;
+ unsigned int high, low;
+
+ high = mac_addr[0] << 8 | mac_addr[1] << 0;
+ low = mac_addr[2] << 24 | mac_addr[3] << 16 |
+ mac_addr[4] << 8 | mac_addr[5];
+
+ regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high);
+ regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low);
+}
+
+static void mtk_star_reset_counters(struct mtk_star_priv *priv)
+{
+ static const unsigned int counter_regs[] = {
+ MTK_STAR_REG_C_RXOKPKT,
+ MTK_STAR_REG_C_RXOKBYTE,
+ MTK_STAR_REG_C_RXRUNT,
+ MTK_STAR_REG_C_RXLONG,
+ MTK_STAR_REG_C_RXDROP,
+ MTK_STAR_REG_C_RXCRC,
+ MTK_STAR_REG_C_RXARLDROP,
+ MTK_STAR_REG_C_RXVLANDROP,
+ MTK_STAR_REG_C_RXCSERR,
+ MTK_STAR_REG_C_RXPAUSE,
+ MTK_STAR_REG_C_TXOKPKT,
+ MTK_STAR_REG_C_TXOKBYTE,
+ MTK_STAR_REG_C_TXPAUSECOL,
+ MTK_STAR_REG_C_TXRTY,
+ MTK_STAR_REG_C_TXSKIP,
+ MTK_STAR_REG_C_TX_ARP,
+ MTK_STAR_REG_C_RX_RERR,
+ MTK_STAR_REG_C_RX_UNI,
+ MTK_STAR_REG_C_RX_MULTI,
+ MTK_STAR_REG_C_RX_BROAD,
+ MTK_STAR_REG_C_RX_ALIGNERR,
+ MTK_STAR_REG_C_TX_UNI,
+ MTK_STAR_REG_C_TX_MULTI,
+ MTK_STAR_REG_C_TX_BROAD,
+ MTK_STAR_REG_C_TX_TIMEOUT,
+ MTK_STAR_REG_C_TX_LATECOL,
+ MTK_STAR_REG_C_RX_LENGTHERR,
+ MTK_STAR_REG_C_RX_TWIST,
+ };
+
+ unsigned int i, val;
+
+ for (i = 0; i < ARRAY_SIZE(counter_regs); i++)
+ regmap_read(priv->regs, counter_regs[i], &val);
+}
+
+static void mtk_star_update_stat(struct mtk_star_priv *priv,
+ unsigned int reg, u64 *stat)
+{
+ unsigned int val;
+
+ regmap_read(priv->regs, reg, &val);
+ *stat += val;
+}
+
+/* Try to get as many stats as possible from the internal registers instead
+ * of tracking them ourselves.
+ */
+static void mtk_star_update_stats(struct mtk_star_priv *priv)
+{
+ struct rtnl_link_stats64 *stats = &priv->stats;
+
+ /* OK packets and bytes. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes);
+
+ /* RX & TX multicast. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast);
+
+ /* Collisions. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL,
+ &stats->collisions);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL,
+ &stats->collisions);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions);
+
+ /* RX Errors. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR,
+ &stats->rx_length_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG,
+ &stats->rx_over_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR,
+ &stats->rx_frame_errors);
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP,
+ &stats->rx_fifo_errors);
+ /* Sum of the general RX error counter + all of the above. */
+ mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors);
+ stats->rx_errors += stats->rx_length_errors;
+ stats->rx_errors += stats->rx_over_errors;
+ stats->rx_errors += stats->rx_crc_errors;
+ stats->rx_errors += stats->rx_frame_errors;
+ stats->rx_errors += stats->rx_fifo_errors;
+}
+
+static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev)
+{
+ uintptr_t tail, offset;
+ struct sk_buff *skb;
+
+ skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE);
+ if (!skb)
+ return NULL;
+
+ /* Align to 16 bytes. */
+ tail = (uintptr_t)skb_tail_pointer(skb);
+ if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) {
+ offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1);
+ skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset);
+ }
+
+ /* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will
+ * extract the Ethernet header (14 bytes) so we need two more bytes.
+ */
+ skb_reserve(skb, MTK_STAR_IP_ALIGN);
+
+ return skb;
+}
+
+static int mtk_star_prepare_rx_skbs(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct mtk_star_ring *ring = &priv->rx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc *desc;
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+ int i;
+
+ for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) {
+ skb = mtk_star_alloc_skb(ndev);
+ if (!skb)
+ return -ENOMEM;
+
+ dma_addr = mtk_star_dma_map_rx(priv, skb);
+ if (dma_mapping_error(dev, dma_addr)) {
+ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ desc = &ring->descs[i];
+ desc->data_ptr = dma_addr;
+ desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN;
+ desc->status &= ~MTK_STAR_DESC_BIT_COWN;
+ ring->skbs[i] = skb;
+ ring->dma_addrs[i] = dma_addr;
+ }
+
+ return 0;
+}
+
+static void
+mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring,
+ void (*unmap_func)(struct mtk_star_priv *,
+ struct mtk_star_ring_desc_data *))
+{
+ struct mtk_star_ring_desc_data desc_data;
+ int i;
+
+ for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) {
+ if (!ring->dma_addrs[i])
+ continue;
+
+ desc_data.dma_addr = ring->dma_addrs[i];
+ desc_data.skb = ring->skbs[i];
+
+ unmap_func(priv, &desc_data);
+ dev_kfree_skb(desc_data.skb);
+ }
+}
+
+static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->rx_ring;
+
+ mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx);
+}
+
+static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->tx_ring;
+
+ mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx);
+}
+
+/**
+ * mtk_star_handle_irq - Interrupt Handler.
+ * @irq: interrupt number.
+ * @data: pointer to a network interface device structure.
+ * Description : this is the driver interrupt service routine.
+ * it mainly handles:
+ * 1. tx complete interrupt for frame transmission.
+ * 2. rx complete interrupt for frame reception.
+ * 3. MAC Management Counter interrupt to avoid counter overflow.
+ **/
+static irqreturn_t mtk_star_handle_irq(int irq, void *data)
+{
+ struct net_device *ndev = data;
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ unsigned int intr_status = mtk_star_intr_ack_all(priv);
+ bool rx, tx;
+
+ rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) &&
+ napi_schedule_prep(&priv->rx_napi);
+ tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) &&
+ napi_schedule_prep(&priv->tx_napi);
+
+ if (rx || tx) {
+ spin_lock(&priv->lock);
+ /* mask Rx and TX Complete interrupt */
+ mtk_star_disable_dma_irq(priv, rx, tx);
+ spin_unlock(&priv->lock);
+
+ if (rx)
+ __napi_schedule(&priv->rx_napi);
+ if (tx)
+ __napi_schedule(&priv->tx_napi);
+ }
+
+ /* interrupt is triggered once any counters reach 0x8000000 */
+ if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) {
+ mtk_star_update_stats(priv);
+ mtk_star_reset_counters(priv);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Wait for the completion of any previous command - CMD_START bit must be
+ * cleared by hardware.
+ */
+static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout_atomic(priv->regs,
+ MTK_STAR_REG_HASH_CTRL, val,
+ !(val & MTK_STAR_BIT_HASH_CTRL_CMD_START),
+ 10, MTK_STAR_WAIT_TIMEOUT);
+}
+
+static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+ int ret;
+
+ /* Wait for BIST_DONE bit. */
+ ret = regmap_read_poll_timeout_atomic(priv->regs,
+ MTK_STAR_REG_HASH_CTRL, val,
+ val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE,
+ 10, MTK_STAR_WAIT_TIMEOUT);
+ if (ret)
+ return ret;
+
+ /* Check the BIST_OK bit. */
+ if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
+ MTK_STAR_BIT_HASH_CTRL_BIST_OK))
+ return -EIO;
+
+ return 0;
+}
+
+static int mtk_star_set_hashbit(struct mtk_star_priv *priv,
+ unsigned int hash_addr)
+{
+ unsigned int val;
+ int ret;
+
+ ret = mtk_star_hash_wait_cmd_start(priv);
+ if (ret)
+ return ret;
+
+ val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR;
+ val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD;
+ val |= MTK_STAR_BIT_HASH_CTRL_CMD_START;
+ val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN;
+ val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA;
+ regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val);
+
+ return mtk_star_hash_wait_ok(priv);
+}
+
+static int mtk_star_reset_hash_table(struct mtk_star_priv *priv)
+{
+ int ret;
+
+ ret = mtk_star_hash_wait_cmd_start(priv);
+ if (ret)
+ return ret;
+
+ regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL,
+ MTK_STAR_BIT_HASH_CTRL_BIST_EN);
+ regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1,
+ MTK_STAR_BIT_TEST1_RST_HASH_MBIST);
+
+ return mtk_star_hash_wait_ok(priv);
+}
+
+static void mtk_star_phy_config(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ if (priv->speed == SPEED_1000)
+ val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M;
+ else if (priv->speed == SPEED_100)
+ val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M;
+ else
+ val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M;
+ val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD;
+
+ val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN;
+ if (priv->pause) {
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
+ } else {
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX;
+ val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX;
+ }
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val);
+
+ val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K;
+ val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH;
+ val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR;
+ regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG,
+ MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH |
+ MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val);
+
+ val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K;
+ val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS;
+ regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG,
+ MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val);
+}
+
+static void mtk_star_adjust_link(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = priv->phydev;
+ bool new_state = false;
+
+ if (phydev->link) {
+ if (!priv->link) {
+ priv->link = phydev->link;
+ new_state = true;
+ }
+
+ if (priv->speed != phydev->speed) {
+ priv->speed = phydev->speed;
+ new_state = true;
+ }
+
+ if (priv->pause != phydev->pause) {
+ priv->pause = phydev->pause;
+ new_state = true;
+ }
+ } else {
+ if (priv->link) {
+ priv->link = phydev->link;
+ new_state = true;
+ }
+ }
+
+ if (new_state) {
+ if (phydev->link)
+ mtk_star_phy_config(priv);
+
+ phy_print_status(ndev->phydev);
+ }
+}
+
+static void mtk_star_init_config(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE |
+ MTK_STAR_BIT_EXT_MDC_MODE |
+ MTK_STAR_BIT_SWC_MII_MODE);
+
+ regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val);
+ regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF,
+ MTK_STAR_MSK_MAC_CLK_CONF,
+ priv->compat_data->bit_clk_div);
+}
+
+static int mtk_star_enable(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ unsigned int val;
+ int ret;
+
+ mtk_star_nic_disable_pd(priv);
+ mtk_star_intr_disable(priv);
+ mtk_star_dma_stop(priv);
+
+ mtk_star_set_mac_addr(ndev);
+
+ /* Configure the MAC */
+ val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT;
+ val <<= MTK_STAR_OFF_MAC_CFG_IPG;
+ val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522;
+ val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD;
+ val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP;
+ regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val);
+
+ /* Enable Hash Table BIST and reset it */
+ ret = mtk_star_reset_hash_table(priv);
+ if (ret)
+ return ret;
+
+ /* Setup the hashing algorithm */
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
+ MTK_STAR_BIT_ARL_CFG_HASH_ALG |
+ MTK_STAR_BIT_ARL_CFG_MISC_MODE);
+
+ /* Don't strip VLAN tags */
+ regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG,
+ MTK_STAR_BIT_MAC_CFG_VLAN_STRIP);
+
+ /* Setup DMA */
+ mtk_star_dma_init(priv);
+
+ ret = mtk_star_prepare_rx_skbs(ndev);
+ if (ret)
+ goto err_out;
+
+ /* Request the interrupt */
+ ret = request_irq(ndev->irq, mtk_star_handle_irq,
+ IRQF_TRIGGER_NONE, ndev->name, ndev);
+ if (ret)
+ goto err_free_skbs;
+
+ napi_enable(&priv->tx_napi);
+ napi_enable(&priv->rx_napi);
+
+ mtk_star_intr_ack_all(priv);
+ mtk_star_intr_enable(priv);
+
+ /* Connect to and start PHY */
+ priv->phydev = of_phy_connect(ndev, priv->phy_node,
+ mtk_star_adjust_link, 0, priv->phy_intf);
+ if (!priv->phydev) {
+ netdev_err(ndev, "failed to connect to PHY\n");
+ ret = -ENODEV;
+ goto err_free_irq;
+ }
+
+ mtk_star_dma_start(priv);
+ phy_start(priv->phydev);
+ netif_start_queue(ndev);
+
+ return 0;
+
+err_free_irq:
+ napi_disable(&priv->rx_napi);
+ napi_disable(&priv->tx_napi);
+ free_irq(ndev->irq, ndev);
+err_free_skbs:
+ mtk_star_free_rx_skbs(priv);
+err_out:
+ return ret;
+}
+
+static void mtk_star_disable(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+
+ netif_stop_queue(ndev);
+ napi_disable(&priv->tx_napi);
+ napi_disable(&priv->rx_napi);
+ mtk_star_intr_disable(priv);
+ mtk_star_dma_disable(priv);
+ mtk_star_intr_ack_all(priv);
+ phy_stop(priv->phydev);
+ phy_disconnect(priv->phydev);
+ free_irq(ndev->irq, ndev);
+ mtk_star_free_rx_skbs(priv);
+ mtk_star_free_tx_skbs(priv);
+}
+
+static int mtk_star_netdev_open(struct net_device *ndev)
+{
+ return mtk_star_enable(ndev);
+}
+
+static int mtk_star_netdev_stop(struct net_device *ndev)
+{
+ mtk_star_disable(ndev);
+
+ return 0;
+}
+
+static int mtk_star_netdev_ioctl(struct net_device *ndev,
+ struct ifreq *req, int cmd)
+{
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ return phy_mii_ioctl(ndev->phydev, req, cmd);
+}
+
+static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ netif_stop_queue(priv->ndev);
+
+ /* Might race with mtk_star_tx_poll, check again */
+ smp_mb();
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size))
+ return -EBUSY;
+
+ netif_start_queue(priv->ndev);
+
+ return 0;
+}
+
+static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size)
+{
+ if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size))
+ return 0;
+
+ return __mtk_star_maybe_stop_tx(priv, size);
+}
+
+static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc_data desc_data;
+ int nfrags = skb_shinfo(skb)->nr_frags;
+
+ if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) {
+ if (!netif_queue_stopped(ndev)) {
+ netif_stop_queue(ndev);
+ /* This is a hard error, log it. */
+ pr_err_ratelimited("Tx ring full when queue awake\n");
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb);
+ if (dma_mapping_error(dev, desc_data.dma_addr))
+ goto err_drop_packet;
+
+ desc_data.skb = skb;
+ desc_data.len = skb->len;
+ mtk_star_ring_push_head_tx(ring, &desc_data);
+
+ netdev_sent_queue(ndev, skb->len);
+
+ mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED);
+
+ mtk_star_dma_resume_tx(priv);
+
+ return NETDEV_TX_OK;
+
+err_drop_packet:
+ dev_kfree_skb(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/* Returns the number of bytes sent or a negative number on the first
+ * descriptor owned by DMA.
+ */
+static int mtk_star_tx_complete_one(struct mtk_star_priv *priv)
+{
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct mtk_star_ring_desc_data desc_data;
+ int ret;
+
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ if (ret)
+ return ret;
+
+ mtk_star_dma_unmap_tx(priv, &desc_data);
+ ret = desc_data.skb->len;
+ dev_kfree_skb_irq(desc_data.skb);
+
+ return ret;
+}
+
+static int mtk_star_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv,
+ tx_napi);
+ int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0;
+ struct mtk_star_ring *ring = &priv->tx_ring;
+ struct net_device *ndev = priv->ndev;
+ unsigned int head = ring->head;
+ unsigned int entry = ring->tail;
+
+ while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) {
+ ret = mtk_star_tx_complete_one(priv);
+ if (ret < 0)
+ break;
+
+ count++;
+ pkts_compl++;
+ bytes_compl += ret;
+ entry = ring->tail;
+ }
+
+ netdev_completed_queue(ndev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(ndev)) &&
+ (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH))
+ netif_wake_queue(ndev);
+
+ if (napi_complete(napi)) {
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, false, true);
+ spin_unlock(&priv->lock);
+ }
+
+ return 0;
+}
+
+static void mtk_star_netdev_get_stats64(struct net_device *ndev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+
+ mtk_star_update_stats(priv);
+
+ memcpy(stats, &priv->stats, sizeof(*stats));
+}
+
+static void mtk_star_set_rx_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct netdev_hw_addr *hw_addr;
+ unsigned int hash_addr, i;
+ int ret;
+
+ if (ndev->flags & IFF_PROMISC) {
+ regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG,
+ MTK_STAR_BIT_ARL_CFG_MISC_MODE);
+ } else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT ||
+ ndev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) {
+ ret = mtk_star_set_hashbit(priv, i);
+ if (ret)
+ goto hash_fail;
+ }
+ } else {
+ /* Clear previous settings. */
+ ret = mtk_star_reset_hash_table(priv);
+ if (ret)
+ goto hash_fail;
+
+ netdev_for_each_mc_addr(hw_addr, ndev) {
+ hash_addr = (hw_addr->addr[0] & 0x01) << 8;
+ hash_addr += hw_addr->addr[5];
+ ret = mtk_star_set_hashbit(priv, hash_addr);
+ if (ret)
+ goto hash_fail;
+ }
+ }
+
+ return;
+
+hash_fail:
+ if (ret == -ETIMEDOUT)
+ netdev_err(ndev, "setting hash bit timed out\n");
+ else
+ /* Should be -EIO */
+ netdev_err(ndev, "unable to set hash bit");
+}
+
+static const struct net_device_ops mtk_star_netdev_ops = {
+ .ndo_open = mtk_star_netdev_open,
+ .ndo_stop = mtk_star_netdev_stop,
+ .ndo_start_xmit = mtk_star_netdev_start_xmit,
+ .ndo_get_stats64 = mtk_star_netdev_get_stats64,
+ .ndo_set_rx_mode = mtk_star_set_rx_mode,
+ .ndo_eth_ioctl = mtk_star_netdev_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+};
+
+static void mtk_star_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver));
+}
+
+/* TODO Add ethtool stats. */
+static const struct ethtool_ops mtk_star_ethtool_ops = {
+ .get_drvinfo = mtk_star_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static int mtk_star_rx(struct mtk_star_priv *priv, int budget)
+{
+ struct mtk_star_ring *ring = &priv->rx_ring;
+ struct device *dev = mtk_star_get_dev(priv);
+ struct mtk_star_ring_desc_data desc_data;
+ struct net_device *ndev = priv->ndev;
+ struct sk_buff *curr_skb, *new_skb;
+ dma_addr_t new_dma_addr;
+ int ret, count = 0;
+
+ while (count < budget) {
+ ret = mtk_star_ring_pop_tail(ring, &desc_data);
+ if (ret)
+ return -1;
+
+ curr_skb = desc_data.skb;
+
+ if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) ||
+ (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) {
+ /* Error packet -> drop and reuse skb. */
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
+
+ /* Prepare new skb before receiving the current one.
+ * Reuse the current skb if we fail at any point.
+ */
+ new_skb = mtk_star_alloc_skb(ndev);
+ if (!new_skb) {
+ ndev->stats.rx_dropped++;
+ new_skb = curr_skb;
+ goto push_new_skb;
+ }
+
+ new_dma_addr = mtk_star_dma_map_rx(priv, new_skb);
+ if (dma_mapping_error(dev, new_dma_addr)) {
+ ndev->stats.rx_dropped++;
+ dev_kfree_skb(new_skb);
+ new_skb = curr_skb;
+ netdev_err(ndev, "DMA mapping error of RX descriptor\n");
+ goto push_new_skb;
+ }
+
+ /* We can't fail anymore at this point:
+ * it's safe to unmap the skb.
+ */
+ mtk_star_dma_unmap_rx(priv, &desc_data);
+
+ skb_put(desc_data.skb, desc_data.len);
+ desc_data.skb->ip_summed = CHECKSUM_NONE;
+ desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev);
+ desc_data.skb->dev = ndev;
+ netif_receive_skb(desc_data.skb);
+
+ /* update dma_addr for new skb */
+ desc_data.dma_addr = new_dma_addr;
+
+push_new_skb:
+
+ count++;
+
+ desc_data.len = skb_tailroom(new_skb);
+ desc_data.skb = new_skb;
+ mtk_star_ring_push_head_rx(ring, &desc_data);
+ }
+
+ mtk_star_dma_resume_rx(priv);
+
+ return count;
+}
+
+static int mtk_star_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct mtk_star_priv *priv;
+ int work_done = 0;
+
+ priv = container_of(napi, struct mtk_star_priv, rx_napi);
+
+ work_done = mtk_star_rx(priv, budget);
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ spin_lock(&priv->lock);
+ mtk_star_enable_dma_irq(priv, true, false);
+ spin_unlock(&priv->lock);
+ }
+
+ return work_done;
+}
+
+static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv)
+{
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0,
+ MTK_STAR_BIT_PHY_CTRL0_RWOK);
+}
+
+static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv)
+{
+ unsigned int val;
+
+ return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0,
+ val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK,
+ 10, MTK_STAR_WAIT_TIMEOUT);
+}
+
+static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum)
+{
+ struct mtk_star_priv *priv = mii->priv;
+ unsigned int val, data;
+ int ret;
+
+ mtk_star_mdio_rwok_clear(priv);
+
+ val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG);
+ val &= MTK_STAR_MSK_PHY_CTRL0_PREG;
+ val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD;
+
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
+
+ ret = mtk_star_mdio_rwok_wait(priv);
+ if (ret)
+ return ret;
+
+ regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data);
+
+ data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
+ data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
+
+ return data;
+}
+
+static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id,
+ int regnum, u16 data)
+{
+ struct mtk_star_priv *priv = mii->priv;
+ unsigned int val;
+
+ mtk_star_mdio_rwok_clear(priv);
+
+ val = data;
+ val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA;
+ val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA;
+ regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG;
+ regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG;
+ val |= regnum;
+ val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD;
+
+ regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val);
+
+ return mtk_star_mdio_rwok_wait(priv);
+}
+
+static int mtk_star_mdio_init(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ struct device_node *of_node, *mdio_node;
+ int ret;
+
+ of_node = dev->of_node;
+
+ mdio_node = of_get_child_by_name(of_node, "mdio");
+ if (!mdio_node)
+ return -ENODEV;
+
+ if (!of_device_is_available(mdio_node)) {
+ ret = -ENODEV;
+ goto out_put_node;
+ }
+
+ priv->mii = devm_mdiobus_alloc(dev);
+ if (!priv->mii) {
+ ret = -ENOMEM;
+ goto out_put_node;
+ }
+
+ snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev));
+ priv->mii->name = "mtk-mac-mdio";
+ priv->mii->parent = dev;
+ priv->mii->read = mtk_star_mdio_read;
+ priv->mii->write = mtk_star_mdio_write;
+ priv->mii->priv = priv;
+
+ ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node);
+
+out_put_node:
+ of_node_put(mdio_node);
+ return ret;
+}
+
+static __maybe_unused int mtk_star_suspend(struct device *dev)
+{
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+
+ ndev = dev_get_drvdata(dev);
+ priv = netdev_priv(ndev);
+
+ if (netif_running(ndev))
+ mtk_star_disable(ndev);
+
+ clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+
+ return 0;
+}
+
+static __maybe_unused int mtk_star_resume(struct device *dev)
+{
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+ int ret;
+
+ ndev = dev_get_drvdata(dev);
+ priv = netdev_priv(ndev);
+
+ ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ if (netif_running(ndev)) {
+ ret = mtk_star_enable(ndev);
+ if (ret)
+ clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+ }
+
+ return ret;
+}
+
+static void mtk_star_clk_disable_unprepare(void *data)
+{
+ struct mtk_star_priv *priv = data;
+
+ clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks);
+}
+
+static int mtk_star_set_timing(struct mtk_star_priv *priv)
+{
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int delay_val = 0;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv);
+ delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv);
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val);
+}
+
+static int mtk_star_probe(struct platform_device *pdev)
+{
+ struct device_node *of_node;
+ struct mtk_star_priv *priv;
+ struct net_device *ndev;
+ struct device *dev;
+ void __iomem *base;
+ int ret, i;
+
+ dev = &pdev->dev;
+ of_node = dev->of_node;
+
+ ndev = devm_alloc_etherdev(dev, sizeof(*priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ priv = netdev_priv(ndev);
+ priv->ndev = ndev;
+ priv->compat_data = of_device_get_match_data(&pdev->dev);
+ SET_NETDEV_DEV(ndev, dev);
+ platform_set_drvdata(pdev, ndev);
+
+ ndev->min_mtu = ETH_ZLEN;
+ ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE;
+
+ spin_lock_init(&priv->lock);
+
+ base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ /* We won't be checking the return values of regmap read & write
+ * functions. They can only fail for mmio if there's a clock attached
+ * to regmap which is not the case here.
+ */
+ priv->regs = devm_regmap_init_mmio(dev, base,
+ &mtk_star_regmap_config);
+ if (IS_ERR(priv->regs))
+ return PTR_ERR(priv->regs);
+
+ priv->pericfg = syscon_regmap_lookup_by_phandle(of_node,
+ "mediatek,pericfg");
+ if (IS_ERR(priv->pericfg)) {
+ dev_err(dev, "Failed to lookup the PERICFG syscon\n");
+ return PTR_ERR(priv->pericfg);
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0)
+ return ndev->irq;
+
+ for (i = 0; i < MTK_STAR_NCLKS; i++)
+ priv->clks[i].id = mtk_star_clk_names[i];
+ ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks);
+ if (ret)
+ return ret;
+
+ ret = devm_add_action_or_reset(dev,
+ mtk_star_clk_disable_unprepare, priv);
+ if (ret)
+ return ret;
+
+ ret = of_get_phy_mode(of_node, &priv->phy_intf);
+ if (ret) {
+ return ret;
+ } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII &&
+ priv->phy_intf != PHY_INTERFACE_MODE_MII) {
+ dev_err(dev, "unsupported phy mode: %s\n",
+ phy_modes(priv->phy_intf));
+ return -EINVAL;
+ }
+
+ priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ dev_err(dev, "failed to retrieve the phy handle from device tree\n");
+ return -ENODEV;
+ }
+
+ priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc");
+ priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse");
+ priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse");
+
+ if (priv->compat_data->set_interface_mode) {
+ ret = priv->compat_data->set_interface_mode(ndev);
+ if (ret) {
+ dev_err(dev, "Failed to set phy interface, err = %d\n", ret);
+ return -EINVAL;
+ }
+ }
+
+ ret = mtk_star_set_timing(priv);
+ if (ret) {
+ dev_err(dev, "Failed to set timing, err = %d\n", ret);
+ return -EINVAL;
+ }
+
+ ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(dev, "unsupported DMA mask\n");
+ return ret;
+ }
+
+ priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE,
+ &priv->dma_addr,
+ GFP_KERNEL | GFP_DMA);
+ if (!priv->ring_base)
+ return -ENOMEM;
+
+ mtk_star_nic_disable_pd(priv);
+ mtk_star_init_config(priv);
+
+ ret = mtk_star_mdio_init(ndev);
+ if (ret)
+ return ret;
+
+ ret = platform_get_ethdev_address(dev, ndev);
+ if (ret || !is_valid_ether_addr(ndev->dev_addr))
+ eth_hw_addr_random(ndev);
+
+ ndev->netdev_ops = &mtk_star_netdev_ops;
+ ndev->ethtool_ops = &mtk_star_ethtool_ops;
+
+ netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll);
+ netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll);
+
+ return devm_register_netdev(dev, ndev);
+}
+
+#ifdef CONFIG_OF
+static int mt8516_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val, ret, rmii_rxc;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ rmii_rxc = 0;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ ret = regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG1_CON,
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK,
+ rmii_rxc);
+ if (ret)
+ return ret;
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG0_CON,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF,
+ intf_val);
+}
+
+static int mt8365_set_interface_mode(struct net_device *ndev)
+{
+ struct mtk_star_priv *priv = netdev_priv(ndev);
+ struct device *dev = mtk_star_get_dev(priv);
+ unsigned int intf_val;
+
+ switch (priv->phy_intf) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII;
+ intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2;
+ break;
+ default:
+ dev_err(dev, "This interface not supported\n");
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(priv->pericfg,
+ MTK_PERICFG_REG_NIC_CFG_CON_V2,
+ MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF |
+ MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2,
+ intf_val);
+}
+
+static const struct mtk_star_compat mtk_star_mt8516_compat = {
+ .set_interface_mode = mt8516_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_10,
+};
+
+static const struct mtk_star_compat mtk_star_mt8365_compat = {
+ .set_interface_mode = mt8365_set_interface_mode,
+ .bit_clk_div = MTK_STAR_BIT_CLK_DIV_50,
+};
+
+static const struct of_device_id mtk_star_of_match[] = {
+ { .compatible = "mediatek,mt8516-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8518-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8175-eth",
+ .data = &mtk_star_mt8516_compat },
+ { .compatible = "mediatek,mt8365-eth",
+ .data = &mtk_star_mt8365_compat },
+ { }
+};
+MODULE_DEVICE_TABLE(of, mtk_star_of_match);
+#endif
+
+static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops,
+ mtk_star_suspend, mtk_star_resume);
+
+static struct platform_driver mtk_star_driver = {
+ .driver = {
+ .name = MTK_STAR_DRVNAME,
+ .pm = &mtk_star_pm_ops,
+ .of_match_table = of_match_ptr(mtk_star_of_match),
+ },
+ .probe = mtk_star_probe,
+};
+module_platform_driver(mtk_star_driver);
+
+MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>");
+MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c
new file mode 100644
index 0000000000..94376aa2b3
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.c
@@ -0,0 +1,1974 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/bitfield.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/mfd/syscon.h>
+#include <linux/debugfs.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <net/flow_offload.h>
+#include <net/pkt_cls.h>
+#include "mtk_eth_soc.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed.h"
+#include "mtk_ppe.h"
+#include "mtk_wed_wo.h"
+
+#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000)
+
+#define MTK_WED_PKT_SIZE 1900
+#define MTK_WED_BUF_SIZE 2048
+#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048)
+#define MTK_WED_RX_RING_SIZE 1536
+
+#define MTK_WED_TX_RING_SIZE 2048
+#define MTK_WED_WDMA_RING_SIZE 1024
+#define MTK_WED_MAX_GROUP_SIZE 0x100
+#define MTK_WED_VLD_GROUP_SIZE 0x40
+#define MTK_WED_PER_GROUP_PKT 128
+
+#define MTK_WED_FBUF_SIZE 128
+#define MTK_WED_MIOD_CNT 16
+#define MTK_WED_FB_CMD_CNT 1024
+#define MTK_WED_RRO_QUE_CNT 8192
+#define MTK_WED_MIOD_ENTRY_CNT 128
+
+static struct mtk_wed_hw *hw_list[2];
+static DEFINE_MUTEX(hw_lock);
+
+struct mtk_wed_flow_block_priv {
+ struct mtk_wed_hw *hw;
+ struct net_device *dev;
+};
+
+static void
+wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ regmap_update_bits(dev->hw->regs, reg, mask | val, val);
+}
+
+static void
+wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, 0, mask);
+}
+
+static void
+wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return wed_m32(dev, reg, mask, 0);
+}
+
+static void
+wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val)
+{
+ wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val);
+}
+
+static void
+wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ wdma_m32(dev, reg, 0, mask);
+}
+
+static void
+wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ wdma_m32(dev, reg, mask, 0);
+}
+
+static u32
+wifi_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ return readl(dev->wlan.base + reg);
+}
+
+static void
+wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ writel(val, dev->wlan.base + reg);
+}
+
+static u32
+mtk_wed_read_reset(struct mtk_wed_device *dev)
+{
+ return wed_r32(dev, MTK_WED_RESET);
+}
+
+static u32
+mtk_wdma_read_reset(struct mtk_wed_device *dev)
+{
+ return wdma_r32(dev, MTK_WDMA_GLO_CFG);
+}
+
+static int
+mtk_wdma_rx_reset(struct mtk_wed_device *dev)
+{
+ u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY;
+ int i, ret;
+
+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN);
+ ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status,
+ !(status & mask), 0, 10000);
+ if (ret)
+ dev_err(dev->hw->dev, "rx reset failed\n");
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) {
+ if (dev->rx_wdma[i].desc)
+ continue;
+
+ wdma_w32(dev,
+ MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
+ }
+
+ return ret;
+}
+
+static void
+mtk_wdma_tx_reset(struct mtk_wed_device *dev)
+{
+ u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY;
+ int i;
+
+ wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN);
+ if (readx_poll_timeout(mtk_wdma_read_reset, dev, status,
+ !(status & mask), 0, 10000))
+ dev_err(dev->hw->dev, "tx reset failed\n");
+
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX);
+ wdma_w32(dev, MTK_WDMA_RESET_IDX, 0);
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++)
+ wdma_w32(dev,
+ MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0);
+}
+
+static void
+mtk_wed_reset(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 status;
+
+ wed_w32(dev, MTK_WED_RESET, mask);
+ if (readx_poll_timeout(mtk_wed_read_reset, dev, status,
+ !(status & mask), 0, 1000))
+ WARN_ON_ONCE(1);
+}
+
+static u32
+mtk_wed_wo_read_status(struct mtk_wed_device *dev)
+{
+ return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS);
+}
+
+static void
+mtk_wed_wo_reset(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ u8 state = MTK_WED_WO_STATE_DISABLE;
+ void __iomem *reg;
+ u32 val;
+
+ mtk_wdma_tx_reset(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+
+ if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_CHANGE_STATE, &state,
+ sizeof(state), false))
+ return;
+
+ if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val,
+ val == MTK_WED_WOIF_DISABLE_DONE,
+ 100, MTK_WOCPU_TIMEOUT))
+ dev_err(dev->hw->dev, "failed to disable wed-wo\n");
+
+ reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4);
+
+ val = readl(reg);
+ switch (dev->hw->index) {
+ case 0:
+ val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ break;
+ case 1:
+ val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK;
+ writel(val, reg);
+ break;
+ default:
+ break;
+ }
+ iounmap(reg);
+}
+
+void mtk_wed_fe_reset(void)
+{
+ int i;
+
+ mutex_lock(&hw_lock);
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw = hw_list[i];
+ struct mtk_wed_device *dev;
+ int err;
+
+ if (!hw)
+ break;
+
+ dev = hw->wed_dev;
+ if (!dev || !dev->wlan.reset)
+ continue;
+
+ /* reset callback blocks until WLAN reset is completed */
+ err = dev->wlan.reset(dev);
+ if (err)
+ dev_err(dev->dev, "wlan reset failed: %d\n", err);
+ }
+
+ mutex_unlock(&hw_lock);
+}
+
+void mtk_wed_fe_reset_complete(void)
+{
+ int i;
+
+ mutex_lock(&hw_lock);
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw = hw_list[i];
+ struct mtk_wed_device *dev;
+
+ if (!hw)
+ break;
+
+ dev = hw->wed_dev;
+ if (!dev || !dev->wlan.reset_complete)
+ continue;
+
+ dev->wlan.reset_complete(dev);
+ }
+
+ mutex_unlock(&hw_lock);
+}
+
+static struct mtk_wed_hw *
+mtk_wed_assign(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw;
+ int i;
+
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)];
+ if (!hw)
+ return NULL;
+
+ if (!hw->wed_dev)
+ goto out;
+
+ if (hw->version == 1)
+ return NULL;
+
+ /* MT7986 WED devices do not have any pcie slot restrictions */
+ }
+ /* MT7986 PCIE or AXI */
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ hw = hw_list[i];
+ if (hw && !hw->wed_dev)
+ goto out;
+ }
+
+ return NULL;
+
+out:
+ hw->wed_dev = dev;
+ return hw;
+}
+
+static int
+mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc;
+ dma_addr_t desc_phys;
+ void **page_list;
+ int token = dev->wlan.token_start;
+ int ring_size;
+ int n_pages;
+ int i, page_idx;
+
+ ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1);
+ n_pages = ring_size / MTK_WED_BUF_PER_PAGE;
+
+ page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL);
+ if (!page_list)
+ return -ENOMEM;
+
+ dev->tx_buf_ring.size = ring_size;
+ dev->tx_buf_ring.pages = page_list;
+
+ desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->tx_buf_ring.desc = desc;
+ dev->tx_buf_ring.desc_phys = desc_phys;
+
+ for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) {
+ dma_addr_t page_phys, buf_phys;
+ struct page *page;
+ void *buf;
+ int s;
+
+ page = __dev_alloc_pages(GFP_KERNEL, 0);
+ if (!page)
+ return -ENOMEM;
+
+ page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ if (dma_mapping_error(dev->hw->dev, page_phys)) {
+ __free_page(page);
+ return -ENOMEM;
+ }
+
+ page_list[page_idx++] = page;
+ dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+
+ buf = page_to_virt(page);
+ buf_phys = page_phys;
+
+ for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) {
+ u32 txd_size;
+ u32 ctrl;
+
+ txd_size = dev->wlan.init_buf(buf, buf_phys, token++);
+
+ desc->buf0 = cpu_to_le32(buf_phys);
+ desc->buf1 = cpu_to_le32(buf_phys + txd_size);
+
+ if (dev->hw->version == 1)
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG1;
+ else
+ ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) |
+ FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2,
+ MTK_WED_BUF_SIZE - txd_size) |
+ MTK_WDMA_DESC_CTRL_LAST_SEG0;
+ desc->ctrl = cpu_to_le32(ctrl);
+ desc->info = 0;
+ desc++;
+
+ buf += MTK_WED_BUF_SIZE;
+ buf_phys += MTK_WED_BUF_SIZE;
+ }
+
+ dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ }
+
+ return 0;
+}
+
+static void
+mtk_wed_free_tx_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_wdma_desc *desc = dev->tx_buf_ring.desc;
+ void **page_list = dev->tx_buf_ring.pages;
+ int page_idx;
+ int i;
+
+ if (!page_list)
+ return;
+
+ if (!desc)
+ goto free_pagelist;
+
+ for (i = 0, page_idx = 0; i < dev->tx_buf_ring.size;
+ i += MTK_WED_BUF_PER_PAGE) {
+ void *page = page_list[page_idx++];
+ dma_addr_t buf_addr;
+
+ if (!page)
+ break;
+
+ buf_addr = le32_to_cpu(desc[i].buf0);
+ dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE,
+ DMA_BIDIRECTIONAL);
+ __free_page(page);
+ }
+
+ dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * sizeof(*desc),
+ desc, dev->tx_buf_ring.desc_phys);
+
+free_pagelist:
+ kfree(page_list);
+}
+
+static int
+mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev)
+{
+ struct mtk_rxbm_desc *desc;
+ dma_addr_t desc_phys;
+
+ dev->rx_buf_ring.size = dev->wlan.rx_nbuf;
+ desc = dma_alloc_coherent(dev->hw->dev,
+ dev->wlan.rx_nbuf * sizeof(*desc),
+ &desc_phys, GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ dev->rx_buf_ring.desc = desc;
+ dev->rx_buf_ring.desc_phys = desc_phys;
+ dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt);
+
+ return 0;
+}
+
+static void
+mtk_wed_free_rx_buffer(struct mtk_wed_device *dev)
+{
+ struct mtk_rxbm_desc *desc = dev->rx_buf_ring.desc;
+
+ if (!desc)
+ return;
+
+ dev->wlan.release_rx_buf(dev);
+ dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc),
+ desc, dev->rx_buf_ring.desc_phys);
+}
+
+static void
+mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev)
+{
+ wed_w32(dev, MTK_WED_RX_BM_RX_DMAD,
+ FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size));
+ wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys);
+ wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL |
+ FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt));
+ wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH,
+ FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff));
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+}
+
+static void
+mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring)
+{
+ if (!ring->desc)
+ return;
+
+ dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size,
+ ring->desc, ring->desc_phys);
+}
+
+static void
+mtk_wed_free_rx_rings(struct mtk_wed_device *dev)
+{
+ mtk_wed_free_rx_buffer(dev);
+ mtk_wed_free_ring(dev, &dev->rro.ring);
+}
+
+static void
+mtk_wed_free_tx_rings(struct mtk_wed_device *dev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++)
+ mtk_wed_free_ring(dev, &dev->tx_ring[i]);
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
+ mtk_wed_free_ring(dev, &dev->rx_wdma[i]);
+}
+
+static void
+mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en)
+{
+ u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (dev->hw->version == 1)
+ mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
+ if (!dev->hw->num_flows)
+ mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK);
+}
+
+static void
+mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable)
+{
+ if (enable) {
+ wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103));
+ } else {
+ wed_w32(dev, MTK_WED_TXP_DW1,
+ FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100));
+ wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR);
+ }
+}
+
+#define MTK_WFMDA_RX_DMA_EN BIT(2)
+static void
+mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, int idx)
+{
+ u32 val;
+ int i;
+
+ if (!(dev->rx_ring[idx].flags & MTK_WED_RING_CONFIGURED))
+ return; /* queue is not configured by mt76 */
+
+ for (i = 0; i < 3; i++) {
+ u32 cur_idx;
+
+ cur_idx = wed_r32(dev,
+ MTK_WED_WPDMA_RING_RX_DATA(idx) +
+ MTK_WED_RING_OFS_CPU_IDX);
+ if (cur_idx == MTK_WED_RX_RING_SIZE - 1)
+ break;
+
+ usleep_range(100000, 200000);
+ }
+
+ if (i == 3) {
+ dev_err(dev->hw->dev, "rx dma enable failed\n");
+ return;
+ }
+
+ val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) |
+ MTK_WFMDA_RX_DMA_EN;
+ wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val);
+}
+
+static void
+mtk_wed_dma_disable(struct mtk_wed_device *dev)
+{
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ wed_clr(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+
+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+ if (dev->hw->version == 1) {
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, 0);
+ wdma_clr(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+ }
+
+ mtk_wed_set_512_support(dev, false);
+}
+
+static void
+mtk_wed_stop(struct mtk_wed_device *dev)
+{
+ mtk_wed_set_ext_int(dev, false);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0);
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0);
+ wdma_w32(dev, MTK_WDMA_INT_MASK, 0);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, 0);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0);
+
+ if (dev->hw->version == 1)
+ return;
+
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0);
+ wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0);
+}
+
+static void
+mtk_wed_deinit(struct mtk_wed_device *dev)
+{
+ mtk_wed_stop(dev);
+ mtk_wed_dma_disable(dev);
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ if (dev->hw->version == 1)
+ return;
+
+ wed_clr(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_ROUTE_QM_EN |
+ MTK_WED_CTRL_WED_RX_BM_EN |
+ MTK_WED_CTRL_RX_RRO_QM_EN);
+}
+
+static void
+__mtk_wed_detach(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_hw *hw = dev->hw;
+
+ mtk_wed_deinit(dev);
+
+ mtk_wdma_rx_reset(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_free_tx_buffer(dev);
+ mtk_wed_free_tx_rings(dev);
+
+ if (mtk_wed_get_rx_capa(dev)) {
+ if (hw->wed_wo)
+ mtk_wed_wo_reset(dev);
+ mtk_wed_free_rx_rings(dev);
+ if (hw->wed_wo)
+ mtk_wed_wo_deinit(hw);
+ }
+
+ if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) {
+ struct device_node *wlan_node;
+
+ wlan_node = dev->wlan.pci_dev->dev.of_node;
+ if (of_dma_is_coherent(wlan_node) && hw->hifsys)
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), BIT(hw->index));
+ }
+
+ if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) &&
+ hw->eth->dma_dev != hw->eth->dev)
+ mtk_eth_set_dma_device(hw->eth, hw->eth->dev);
+
+ memset(dev, 0, sizeof(*dev));
+ module_put(THIS_MODULE);
+
+ hw->wed_dev = NULL;
+}
+
+static void
+mtk_wed_detach(struct mtk_wed_device *dev)
+{
+ mutex_lock(&hw_lock);
+ __mtk_wed_detach(dev);
+ mutex_unlock(&hw_lock);
+}
+
+#define PCIE_BASE_ADDR0 0x11280000
+static void
+mtk_wed_bus_init(struct mtk_wed_device *dev)
+{
+ switch (dev->wlan.bus_type) {
+ case MTK_WED_BUS_PCIE: {
+ struct device_node *np = dev->hw->eth->dev->of_node;
+ struct regmap *regs;
+
+ regs = syscon_regmap_lookup_by_phandle(np,
+ "mediatek,wed-pcie");
+ if (IS_ERR(regs))
+ break;
+
+ regmap_update_bits(regs, 0, BIT(0), BIT(0));
+
+ wed_w32(dev, MTK_WED_PCIE_INT_CTRL,
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2));
+
+ /* pcie interrupt control: pola/source selection */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA |
+ FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1));
+ wed_r32(dev, MTK_WED_PCIE_INT_CTRL);
+
+ wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184);
+
+ /* pcie interrupt status trigger register */
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24));
+ wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER);
+
+ /* pola setting */
+ wed_set(dev, MTK_WED_PCIE_INT_CTRL,
+ MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA);
+ break;
+ }
+ case MTK_WED_BUS_AXI:
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL,
+ MTK_WED_WPDMA_INT_CTRL_SIG_SRC |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0));
+ break;
+ default:
+ break;
+ }
+}
+
+static void
+mtk_wed_set_wpdma(struct mtk_wed_device *dev)
+{
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys);
+ } else {
+ mtk_wed_bus_init(dev);
+
+ wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx);
+ wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree);
+ wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo);
+ wed_w32(dev, MTK_WED_WPDMA_RX_RING, dev->wlan.wpdma_rx);
+ }
+}
+
+static void
+mtk_wed_hw_init_early(struct mtk_wed_device *dev)
+{
+ u32 mask, set;
+
+ mtk_wed_deinit(dev);
+ mtk_wed_reset(dev, MTK_WED_RESET_WED);
+ mtk_wed_set_wpdma(dev);
+
+ mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE |
+ MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE;
+ set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) |
+ MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP |
+ MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY;
+ wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set);
+
+ if (dev->hw->version == 1) {
+ u32 offset = dev->hw->index ? 0x04000400 : 0;
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset);
+ wed_w32(dev, MTK_WED_PCIE_CFG_BASE,
+ MTK_PCIE_BASE(dev->hw->index));
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT);
+ wed_w32(dev, MTK_WED_WDMA_OFFSET0,
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS,
+ MTK_WDMA_INT_STATUS) |
+ FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG,
+ MTK_WDMA_GLO_CFG));
+
+ wed_w32(dev, MTK_WED_WDMA_OFFSET1,
+ FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL,
+ MTK_WDMA_RING_TX(0)) |
+ FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL,
+ MTK_WDMA_RING_RX(0)));
+ }
+}
+
+static int
+mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+ int size)
+{
+ ring->desc = dma_alloc_coherent(dev->hw->dev,
+ size * sizeof(*ring->desc),
+ &ring->desc_phys, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_size = sizeof(*ring->desc);
+ ring->size = size;
+
+ return 0;
+}
+
+#define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT)
+static int
+mtk_wed_rro_alloc(struct mtk_wed_device *dev)
+{
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ int index;
+
+ index = of_property_match_string(dev->hw->node, "memory-region-names",
+ "wo-dlm");
+ if (index < 0)
+ return index;
+
+ np = of_parse_phandle(dev->hw->node, "memory-region", index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ dev->rro.miod_phys = rmem->base;
+ dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys;
+
+ return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring,
+ MTK_WED_RRO_QUE_CNT);
+}
+
+static int
+mtk_wed_rro_cfg(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ struct {
+ struct {
+ __le32 base;
+ __le32 cnt;
+ __le32 unit;
+ } ring[2];
+ __le32 wed;
+ u8 version;
+ } req = {
+ .ring[0] = {
+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE),
+ .cnt = cpu_to_le32(MTK_WED_MIOD_CNT),
+ .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT),
+ },
+ .ring[1] = {
+ .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE +
+ MTK_WED_MIOD_COUNT),
+ .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT),
+ .unit = cpu_to_le32(4),
+ },
+ };
+
+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_WED_CFG,
+ &req, sizeof(req), true);
+}
+
+static void
+mtk_wed_rro_hw_init(struct mtk_wed_device *dev)
+{
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CFG,
+ FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) |
+ FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) |
+ FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW,
+ MTK_WED_MIOD_ENTRY_CNT >> 2));
+
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys);
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1,
+ FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT));
+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys);
+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1,
+ FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT));
+ wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0);
+ wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys);
+
+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
+ MTK_WED_RROQM_RST_IDX_MIOD |
+ MTK_WED_RROQM_RST_IDX_FDBK);
+
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1);
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
+}
+
+static void
+mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev)
+{
+ wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM);
+
+ for (;;) {
+ usleep_range(100, 200);
+ if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM))
+ break;
+ }
+
+ /* configure RX_ROUTE_QM */
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT);
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+ FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index));
+ wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST);
+ /* enable RX_ROUTE_QM */
+ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+}
+
+static void
+mtk_wed_hw_init(struct mtk_wed_device *dev)
+{
+ if (dev->init_done)
+ return;
+
+ dev->init_done = true;
+ mtk_wed_set_ext_int(dev, false);
+ wed_w32(dev, MTK_WED_TX_BM_CTRL,
+ MTK_WED_TX_BM_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM,
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM,
+ MTK_WED_TX_RING_SIZE / 256));
+
+ wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys);
+
+ wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE);
+
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_TX_BM_TKID,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) |
+ MTK_WED_TX_BM_DYN_THR_HI);
+ } else {
+ wed_w32(dev, MTK_WED_TX_BM_TKID_V2,
+ FIELD_PREP(MTK_WED_TX_BM_TKID_START,
+ dev->wlan.token_start) |
+ FIELD_PREP(MTK_WED_TX_BM_TKID_END,
+ dev->wlan.token_start +
+ dev->wlan.nbuf - 1));
+ wed_w32(dev, MTK_WED_TX_BM_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) |
+ MTK_WED_TX_BM_DYN_THR_HI_V2);
+ wed_w32(dev, MTK_WED_TX_TKID_CTRL,
+ MTK_WED_TX_TKID_CTRL_PAUSE |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM,
+ dev->tx_buf_ring.size / 128) |
+ FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM,
+ dev->tx_buf_ring.size / 128));
+ wed_w32(dev, MTK_WED_TX_TKID_DYN_THR,
+ FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) |
+ MTK_WED_TX_TKID_DYN_THR_HI);
+ }
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ if (dev->hw->version == 1) {
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+ } else {
+ wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE);
+ /* rx hw init */
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+
+ mtk_wed_rx_buffer_hw_init(dev);
+ mtk_wed_rro_hw_init(dev);
+ mtk_wed_route_qm_hw_init(dev);
+ }
+
+ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE);
+}
+
+static void
+mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx)
+{
+ void *head = (void *)ring->desc;
+ int i;
+
+ for (i = 0; i < size; i++) {
+ struct mtk_wdma_desc *desc;
+
+ desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size);
+ desc->buf0 = 0;
+ if (tx)
+ desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE);
+ else
+ desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST);
+ desc->buf1 = 0;
+ desc->info = 0;
+ }
+}
+
+static u32
+mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ return !!(wed_r32(dev, reg) & mask);
+}
+
+static int
+mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask)
+{
+ int sleep = 15000;
+ int timeout = 100 * sleep;
+ u32 val;
+
+ return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep,
+ timeout, false, dev, reg, mask);
+}
+
+static int
+mtk_wed_rx_reset(struct mtk_wed_device *dev)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+ u8 val = MTK_WED_WO_STATE_SER_RESET;
+ int i, ret;
+
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_CHANGE_STATE, &val,
+ sizeof(val), true);
+ if (ret)
+ return ret;
+
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_BUSY);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX,
+ MTK_WED_WPDMA_RX_D_RST_CRX_IDX |
+ MTK_WED_WPDMA_RX_D_RST_DRV_IDX);
+
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
+ wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE |
+ MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE);
+
+ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0);
+ }
+
+ /* reset rro qm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_RRO_QM_BUSY);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM);
+ } else {
+ wed_set(dev, MTK_WED_RROQM_RST_IDX,
+ MTK_WED_RROQM_RST_IDX_MIOD |
+ MTK_WED_RROQM_RST_IDX_FDBK);
+ wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0);
+ }
+
+ /* reset route qm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN);
+ ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_RX_ROUTE_QM_BUSY);
+ if (ret)
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM);
+ else
+ wed_set(dev, MTK_WED_RTQM_GLO_CFG,
+ MTK_WED_RTQM_Q_RST);
+
+ /* reset tx wdma */
+ mtk_wdma_tx_reset(dev);
+
+ /* reset tx wdma drv */
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV);
+
+ /* reset wed rx dma */
+ ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_RX_DMA_BUSY);
+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN);
+ if (ret) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA);
+ } else {
+ struct mtk_eth *eth = dev->hw->eth;
+
+ if (mtk_is_netsys_v2_or_greater(eth))
+ wed_set(dev, MTK_WED_RESET_IDX,
+ MTK_WED_RESET_IDX_RX_V2);
+ else
+ wed_set(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ /* reset rx bm */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN);
+ mtk_wed_poll_busy(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WED_RX_BM_BUSY);
+ mtk_wed_reset(dev, MTK_WED_RESET_RX_BM);
+
+ /* wo change to enable state */
+ val = MTK_WED_WO_STATE_ENABLE;
+ ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO,
+ MTK_WED_WO_CMD_CHANGE_STATE, &val,
+ sizeof(val), true);
+ if (ret)
+ return ret;
+
+ /* wed_rx_ring_reset */
+ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) {
+ if (!dev->rx_ring[i].desc)
+ continue;
+
+ mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE,
+ false);
+ }
+ mtk_wed_free_rx_buffer(dev);
+
+ return 0;
+}
+
+static void
+mtk_wed_reset_dma(struct mtk_wed_device *dev)
+{
+ bool busy = false;
+ u32 val;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) {
+ if (!dev->tx_ring[i].desc)
+ continue;
+
+ mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE,
+ true);
+ }
+
+ /* 1. reset WED tx DMA */
+ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN);
+ busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_BUSY);
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA);
+ } else {
+ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_IDX_TX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ /* 2. reset WDMA rx DMA */
+ busy = !!mtk_wdma_rx_reset(dev);
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+ if (!busy)
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX,
+ MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV);
+ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0);
+
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+
+ wed_clr(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE);
+ }
+
+ /* 3. reset WED WPDMA tx */
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ for (i = 0; i < 100; i++) {
+ val = wed_r32(dev, MTK_WED_TX_BM_INTF);
+ if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40)
+ break;
+ }
+
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT);
+ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN);
+ mtk_wed_reset(dev, MTK_WED_RESET_TX_BM);
+
+ /* 4. reset WED WPDMA tx */
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY);
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ if (!busy)
+ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY);
+
+ if (busy) {
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV);
+ mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV);
+ } else {
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX,
+ MTK_WED_WPDMA_RESET_IDX_TX |
+ MTK_WED_WPDMA_RESET_IDX_RX);
+ wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0);
+ }
+
+ dev->init_done = false;
+ if (dev->hw->version == 1)
+ return;
+
+ if (!busy) {
+ wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX);
+ wed_w32(dev, MTK_WED_RESET_IDX, 0);
+ }
+
+ mtk_wed_rx_reset(dev);
+}
+
+static int
+mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring,
+ int size, u32 desc_size, bool tx)
+{
+ ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size,
+ &ring->desc_phys, GFP_KERNEL);
+ if (!ring->desc)
+ return -ENOMEM;
+
+ ring->desc_size = desc_size;
+ ring->size = size;
+ mtk_wed_ring_reset(ring, size, tx);
+
+ return 0;
+}
+
+static int
+mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+{
+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->rx_wdma))
+ return -EINVAL;
+
+ wdma = &dev->rx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+ desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+ wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+
+ return 0;
+}
+
+static int
+mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size,
+ bool reset)
+{
+ u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version;
+ struct mtk_wed_ring *wdma;
+
+ if (idx >= ARRAY_SIZE(dev->tx_wdma))
+ return -EINVAL;
+
+ wdma = &dev->tx_wdma[idx];
+ if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE,
+ desc_size, true))
+ return -ENOMEM;
+
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+ size);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+ wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0);
+
+ if (reset)
+ mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true);
+
+ if (!idx) {
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE,
+ wdma->desc_phys);
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT,
+ size);
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX,
+ 0);
+ wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX,
+ 0);
+ }
+
+ return 0;
+}
+
+static void
+mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb,
+ u32 reason, u32 hash)
+{
+ struct mtk_eth *eth = dev->hw->eth;
+ struct ethhdr *eh;
+
+ if (!skb)
+ return;
+
+ if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
+ return;
+
+ skb_set_mac_header(skb, 0);
+ eh = eth_hdr(skb);
+ skb->protocol = eh->h_proto;
+ mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash);
+}
+
+static void
+mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0));
+
+ /* wed control cr set */
+ wed_set(dev, MTK_WED_CTRL,
+ MTK_WED_CTRL_WDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WPDMA_INT_AGENT_EN |
+ MTK_WED_CTRL_WED_TX_BM_EN |
+ MTK_WED_CTRL_WED_TX_FREE_AGENT_EN);
+
+ if (dev->hw->version == 1) {
+ wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER,
+ MTK_WED_PCIE_INT_TRIGGER_STATUS);
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER,
+ MTK_WED_WPDMA_INT_TRIGGER_RX_DONE |
+ MTK_WED_WPDMA_INT_TRIGGER_TX_DONE);
+
+ wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask);
+ } else {
+ wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE,
+ GENMASK(1, 0));
+ /* initail tx interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX,
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG,
+ dev->wlan.tx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG,
+ dev->wlan.tx_tbit[1]));
+
+ /* initail txfree interrupt trigger */
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE,
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN |
+ MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG,
+ dev->wlan.txfree_tbit));
+
+ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX,
+ MTK_WED_WPDMA_INT_CTRL_RX0_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX0_CLR |
+ MTK_WED_WPDMA_INT_CTRL_RX1_EN |
+ MTK_WED_WPDMA_INT_CTRL_RX1_CLR |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG,
+ dev->wlan.rx_tbit[0]) |
+ FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG,
+ dev->wlan.rx_tbit[1]));
+
+ wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask);
+ wed_set(dev, MTK_WED_WDMA_INT_CTRL,
+ FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL,
+ dev->wdma_idx));
+ }
+
+ wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask);
+
+ wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask);
+ wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask);
+ wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask);
+ wed_w32(dev, MTK_WED_INT_MASK, irq_mask);
+}
+
+static void
+mtk_wed_dma_enable(struct mtk_wed_device *dev)
+{
+ wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV);
+
+ wed_set(dev, MTK_WED_GLO_CFG,
+ MTK_WED_GLO_CFG_TX_DMA_EN |
+ MTK_WED_GLO_CFG_RX_DMA_EN);
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN);
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_RX_DRV_EN);
+
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_TX_DMA_EN |
+ MTK_WDMA_GLO_CFG_RX_INFO1_PRERES |
+ MTK_WDMA_GLO_CFG_RX_INFO2_PRERES);
+
+ if (dev->hw->version == 1) {
+ wdma_set(dev, MTK_WDMA_GLO_CFG,
+ MTK_WDMA_GLO_CFG_RX_INFO3_PRERES);
+ } else {
+ int i;
+
+ wed_set(dev, MTK_WED_WPDMA_CTRL,
+ MTK_WED_WPDMA_CTRL_SDL1_FIXED);
+
+ wed_set(dev, MTK_WED_WDMA_GLO_CFG,
+ MTK_WED_WDMA_GLO_CFG_TX_DRV_EN |
+ MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK);
+
+ wed_set(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC |
+ MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC);
+
+ wed_clr(dev, MTK_WED_WPDMA_GLO_CFG,
+ MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP |
+ MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV);
+
+ wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG,
+ MTK_WED_WPDMA_RX_D_RX_DRV_EN |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) |
+ FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL,
+ 0x2));
+
+ for (i = 0; i < MTK_WED_RX_QUEUES; i++)
+ mtk_wed_check_wfdma_rx_fill(dev, i);
+ }
+}
+
+static void
+mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask)
+{
+ int i;
+
+ if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++)
+ if (!dev->rx_wdma[i].desc)
+ mtk_wed_wdma_rx_ring_setup(dev, i, 16, false);
+
+ mtk_wed_hw_init(dev);
+ mtk_wed_configure_irq(dev, irq_mask);
+
+ mtk_wed_set_ext_int(dev, true);
+
+ if (dev->hw->version == 1) {
+ u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN |
+ FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID,
+ dev->hw->index);
+
+ val |= BIT(0) | (BIT(1) * !!dev->hw->index);
+ regmap_write(dev->hw->mirror, dev->hw->index * 4, val);
+ } else {
+ /* driver set mid ready and only once */
+ wed_w32(dev, MTK_WED_EXT_INT_MASK1,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+ wed_w32(dev, MTK_WED_EXT_INT_MASK2,
+ MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY);
+
+ wed_r32(dev, MTK_WED_EXT_INT_MASK1);
+ wed_r32(dev, MTK_WED_EXT_INT_MASK2);
+
+ if (mtk_wed_rro_cfg(dev))
+ return;
+
+ }
+
+ mtk_wed_set_512_support(dev, dev->wlan.wcid_512);
+
+ mtk_wed_dma_enable(dev);
+ dev->running = true;
+}
+
+static int
+mtk_wed_attach(struct mtk_wed_device *dev)
+ __releases(RCU)
+{
+ struct mtk_wed_hw *hw;
+ struct device *device;
+ int ret = 0;
+
+ RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
+ "mtk_wed_attach without holding the RCU read lock");
+
+ if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE &&
+ pci_domain_nr(dev->wlan.pci_dev->bus) > 1) ||
+ !try_module_get(THIS_MODULE))
+ ret = -ENODEV;
+
+ rcu_read_unlock();
+
+ if (ret)
+ return ret;
+
+ mutex_lock(&hw_lock);
+
+ hw = mtk_wed_assign(dev);
+ if (!hw) {
+ module_put(THIS_MODULE);
+ ret = -ENODEV;
+ goto unlock;
+ }
+
+ device = dev->wlan.bus_type == MTK_WED_BUS_PCIE
+ ? &dev->wlan.pci_dev->dev
+ : &dev->wlan.platform_dev->dev;
+ dev_info(device, "attaching wed device %d version %d\n",
+ hw->index, hw->version);
+
+ dev->hw = hw;
+ dev->dev = hw->dev;
+ dev->irq = hw->irq;
+ dev->wdma_idx = hw->index;
+ dev->version = hw->version;
+
+ if (hw->eth->dma_dev == hw->eth->dev &&
+ of_dma_is_coherent(hw->eth->dev->of_node))
+ mtk_eth_set_dma_device(hw->eth, hw->dev);
+
+ ret = mtk_wed_tx_buffer_alloc(dev);
+ if (ret)
+ goto out;
+
+ if (mtk_wed_get_rx_capa(dev)) {
+ ret = mtk_wed_rro_alloc(dev);
+ if (ret)
+ goto out;
+ }
+
+ mtk_wed_hw_init_early(dev);
+ if (hw->version == 1) {
+ regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP,
+ BIT(hw->index), 0);
+ } else {
+ dev->rev_id = wed_r32(dev, MTK_WED_REV_ID);
+ ret = mtk_wed_wo_init(hw);
+ }
+out:
+ if (ret) {
+ dev_err(dev->hw->dev, "failed to attach wed device\n");
+ __mtk_wed_detach(dev);
+ }
+unlock:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+static int
+mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
+ bool reset)
+{
+ struct mtk_wed_ring *ring = &dev->tx_ring[idx];
+
+ /*
+ * Tx ring redirection:
+ * Instead of configuring the WLAN PDMA TX ring directly, the WLAN
+ * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n)
+ * registers.
+ *
+ * WED driver posts its own DMA ring as WLAN PDMA TX and configures it
+ * into MTK_WED_WPDMA_RING_TX(n) registers.
+ * It gets filled with packets picked up from WED TX ring and from
+ * WDMA RX.
+ */
+
+ if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring)))
+ return -EINVAL;
+
+ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE,
+ sizeof(*ring->desc), true))
+ return -ENOMEM;
+
+ if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
+ reset))
+ return -ENOMEM;
+
+ ring->reg_base = MTK_WED_RING_TX(idx);
+ ring->wpdma = regs;
+
+ /* WED -> WPDMA */
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE);
+ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE,
+ ring->desc_phys);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT,
+ MTK_WED_TX_RING_SIZE);
+ wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0);
+
+ return 0;
+}
+
+static int
+mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs)
+{
+ struct mtk_wed_ring *ring = &dev->txfree_ring;
+ int i, index = dev->hw->version == 1;
+
+ /*
+ * For txfree event handling, the same DMA ring is shared between WED
+ * and WLAN. The WLAN driver accesses the ring index registers through
+ * WED
+ */
+ ring->reg_base = MTK_WED_RING_RX(index);
+ ring->wpdma = regs;
+
+ for (i = 0; i < 12; i += 4) {
+ u32 val = readl(regs + i);
+
+ wed_w32(dev, MTK_WED_RING_RX(index) + i, val);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val);
+ }
+
+ return 0;
+}
+
+static int
+mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs,
+ bool reset)
+{
+ struct mtk_wed_ring *ring = &dev->rx_ring[idx];
+
+ if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring)))
+ return -EINVAL;
+
+ if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE,
+ sizeof(*ring->desc), false))
+ return -ENOMEM;
+
+ if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE,
+ reset))
+ return -ENOMEM;
+
+ ring->reg_base = MTK_WED_RING_RX_DATA(idx);
+ ring->wpdma = regs;
+ ring->flags |= MTK_WED_RING_CONFIGURED;
+
+ /* WPDMA -> WED */
+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys);
+ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE);
+
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE,
+ ring->desc_phys);
+ wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT,
+ MTK_WED_RX_RING_SIZE);
+
+ return 0;
+}
+
+static u32
+mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask)
+{
+ u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK;
+
+ if (dev->hw->version == 1)
+ ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR;
+ else
+ ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH |
+ MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH |
+ MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT |
+ MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR;
+
+ val = wed_r32(dev, MTK_WED_EXT_INT_STATUS);
+ wed_w32(dev, MTK_WED_EXT_INT_STATUS, val);
+ val &= ext_mask;
+ if (!dev->hw->num_flows)
+ val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD;
+ if (val && net_ratelimit())
+ pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val);
+
+ val = wed_r32(dev, MTK_WED_INT_STATUS);
+ val &= mask;
+ wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */
+
+ return val;
+}
+
+static void
+mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask)
+{
+ if (!dev->running)
+ return;
+
+ mtk_wed_set_ext_int(dev, !!mask);
+ wed_w32(dev, MTK_WED_INT_MASK, mask);
+}
+
+int mtk_wed_flow_add(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+ int ret;
+
+ if (!hw || !hw->wed_dev)
+ return -ENODEV;
+
+ if (hw->num_flows) {
+ hw->num_flows++;
+ return 0;
+ }
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev);
+ if (!ret)
+ hw->num_flows++;
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+
+ return ret;
+}
+
+void mtk_wed_flow_remove(int index)
+{
+ struct mtk_wed_hw *hw = hw_list[index];
+
+ if (!hw)
+ return;
+
+ if (--hw->num_flows)
+ return;
+
+ mutex_lock(&hw_lock);
+ if (!hw->wed_dev)
+ goto out;
+
+ hw->wed_dev->wlan.offload_disable(hw->wed_dev);
+ mtk_wed_set_ext_int(hw->wed_dev, true);
+
+out:
+ mutex_unlock(&hw_lock);
+}
+
+static int
+mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct mtk_wed_flow_block_priv *priv = cb_priv;
+ struct flow_cls_offload *cls = type_data;
+ struct mtk_wed_hw *hw = priv->hw;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+
+ if (type != TC_SETUP_CLSFLOWER)
+ return -EOPNOTSUPP;
+
+ return mtk_flow_offload_cmd(hw->eth, cls, hw->index);
+}
+
+static int
+mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev,
+ struct flow_block_offload *f)
+{
+ struct mtk_wed_flow_block_priv *priv;
+ static LIST_HEAD(block_cb_list);
+ struct flow_block_cb *block_cb;
+ struct mtk_eth *eth = hw->eth;
+ flow_setup_cb_t *cb;
+
+ if (!eth->soc->offload_version)
+ return -EOPNOTSUPP;
+
+ if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ cb = mtk_wed_setup_tc_block_cb;
+ f->driver_block_list = &block_cb_list;
+
+ switch (f->command) {
+ case FLOW_BLOCK_BIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (block_cb) {
+ flow_block_cb_incref(block_cb);
+ return 0;
+ }
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->hw = hw;
+ priv->dev = dev;
+ block_cb = flow_block_cb_alloc(cb, dev, priv, NULL);
+ if (IS_ERR(block_cb)) {
+ kfree(priv);
+ return PTR_ERR(block_cb);
+ }
+
+ flow_block_cb_incref(block_cb);
+ flow_block_cb_add(block_cb, f);
+ list_add_tail(&block_cb->driver_list, &block_cb_list);
+ return 0;
+ case FLOW_BLOCK_UNBIND:
+ block_cb = flow_block_cb_lookup(f->block, cb, dev);
+ if (!block_cb)
+ return -ENOENT;
+
+ if (!flow_block_cb_decref(block_cb)) {
+ flow_block_cb_remove(block_cb, f);
+ list_del(&block_cb->driver_list);
+ kfree(block_cb->cb_priv);
+ }
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev,
+ enum tc_setup_type type, void *type_data)
+{
+ struct mtk_wed_hw *hw = wed->hw;
+
+ if (hw->version < 2)
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ case TC_SETUP_FT:
+ return mtk_wed_setup_tc_block(hw, dev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
+{
+ static const struct mtk_wed_ops wed_ops = {
+ .attach = mtk_wed_attach,
+ .tx_ring_setup = mtk_wed_tx_ring_setup,
+ .rx_ring_setup = mtk_wed_rx_ring_setup,
+ .txfree_ring_setup = mtk_wed_txfree_ring_setup,
+ .msg_update = mtk_wed_mcu_msg_update,
+ .start = mtk_wed_start,
+ .stop = mtk_wed_stop,
+ .reset_dma = mtk_wed_reset_dma,
+ .reg_read = wed_r32,
+ .reg_write = wed_w32,
+ .irq_get = mtk_wed_irq_get,
+ .irq_set_mask = mtk_wed_irq_set_mask,
+ .detach = mtk_wed_detach,
+ .ppe_check = mtk_wed_ppe_check,
+ .setup_tc = mtk_wed_setup_tc,
+ };
+ struct device_node *eth_np = eth->dev->of_node;
+ struct platform_device *pdev;
+ struct mtk_wed_hw *hw;
+ struct regmap *regs;
+ int irq;
+
+ if (!np)
+ return;
+
+ pdev = of_find_device_by_node(np);
+ if (!pdev)
+ goto err_of_node_put;
+
+ get_device(&pdev->dev);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ goto err_put_device;
+
+ regs = syscon_regmap_lookup_by_phandle(np, NULL);
+ if (IS_ERR(regs))
+ goto err_put_device;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops);
+
+ mutex_lock(&hw_lock);
+
+ if (WARN_ON(hw_list[index]))
+ goto unlock;
+
+ hw = kzalloc(sizeof(*hw), GFP_KERNEL);
+ if (!hw)
+ goto unlock;
+
+ hw->node = np;
+ hw->regs = regs;
+ hw->eth = eth;
+ hw->dev = &pdev->dev;
+ hw->wdma_phy = wdma_phy;
+ hw->wdma = wdma;
+ hw->index = index;
+ hw->irq = irq;
+ hw->version = mtk_is_netsys_v1(eth) ? 1 : 2;
+
+ if (hw->version == 1) {
+ hw->mirror = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,pcie-mirror");
+ hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np,
+ "mediatek,hifsys");
+ if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) {
+ kfree(hw);
+ goto unlock;
+ }
+
+ if (!index) {
+ regmap_write(hw->mirror, 0, 0);
+ regmap_write(hw->mirror, 4, 0);
+ }
+ }
+
+ mtk_wed_hw_add_debugfs(hw);
+
+ hw_list[index] = hw;
+
+ mutex_unlock(&hw_lock);
+
+ return;
+
+unlock:
+ mutex_unlock(&hw_lock);
+err_put_device:
+ put_device(&pdev->dev);
+err_of_node_put:
+ of_node_put(np);
+}
+
+void mtk_wed_exit(void)
+{
+ int i;
+
+ rcu_assign_pointer(mtk_soc_wed_ops, NULL);
+
+ synchronize_rcu();
+
+ for (i = 0; i < ARRAY_SIZE(hw_list); i++) {
+ struct mtk_wed_hw *hw;
+
+ hw = hw_list[i];
+ if (!hw)
+ continue;
+
+ hw_list[i] = NULL;
+ debugfs_remove(hw->debugfs_dir);
+ put_device(hw->dev);
+ of_node_put(hw->node);
+ kfree(hw);
+ }
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h
new file mode 100644
index 0000000000..43ab77eaf6
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed.h
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_PRIV_H
+#define __MTK_WED_PRIV_H
+
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <linux/debugfs.h>
+#include <linux/regmap.h>
+#include <linux/netdevice.h>
+
+struct mtk_eth;
+struct mtk_wed_wo;
+
+struct mtk_wed_hw {
+ struct device_node *node;
+ struct mtk_eth *eth;
+ struct regmap *regs;
+ struct regmap *hifsys;
+ struct device *dev;
+ void __iomem *wdma;
+ phys_addr_t wdma_phy;
+ struct regmap *mirror;
+ struct dentry *debugfs_dir;
+ struct mtk_wed_device *wed_dev;
+ struct mtk_wed_wo *wed_wo;
+ u32 debugfs_reg;
+ u32 num_flows;
+ u8 version;
+ char dirname[5];
+ int irq;
+ int index;
+};
+
+struct mtk_wdma_info {
+ u8 wdma_idx;
+ u8 queue;
+ u16 wcid;
+ u8 bss;
+};
+
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+static inline void
+wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ regmap_write(dev->hw->regs, reg, val);
+}
+
+static inline u32
+wed_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ unsigned int val;
+
+ regmap_read(dev->hw->regs, reg, &val);
+
+ return val;
+}
+
+static inline void
+wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ writel(val, dev->hw->wdma + reg);
+}
+
+static inline u32
+wdma_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ return readl(dev->hw->wdma + reg);
+}
+
+static inline u32
+wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return 0;
+
+ return readl(dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline void
+wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
+{
+ if (!dev->tx_ring[ring].wpdma)
+ return;
+
+ writel(val, dev->tx_ring[ring].wpdma + reg);
+}
+
+static inline u32
+wpdma_rx_r32(struct mtk_wed_device *dev, int ring, u32 reg)
+{
+ if (!dev->rx_ring[ring].wpdma)
+ return 0;
+
+ return readl(dev->rx_ring[ring].wpdma + reg);
+}
+
+static inline void
+wpdma_rx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val)
+{
+ if (!dev->rx_ring[ring].wpdma)
+ return;
+
+ writel(val, dev->rx_ring[ring].wpdma + reg);
+}
+
+static inline u32
+wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg)
+{
+ if (!dev->txfree_ring.wpdma)
+ return 0;
+
+ return readl(dev->txfree_ring.wpdma + reg);
+}
+
+static inline void
+wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val)
+{
+ if (!dev->txfree_ring.wpdma)
+ return;
+
+ writel(val, dev->txfree_ring.wpdma + reg);
+}
+
+void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index);
+void mtk_wed_exit(void);
+int mtk_wed_flow_add(int index);
+void mtk_wed_flow_remove(int index);
+void mtk_wed_fe_reset(void);
+void mtk_wed_fe_reset_complete(void);
+#else
+static inline void
+mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth,
+ void __iomem *wdma, phys_addr_t wdma_phy,
+ int index)
+{
+}
+static inline void
+mtk_wed_exit(void)
+{
+}
+static inline int mtk_wed_flow_add(int index)
+{
+ return -EINVAL;
+}
+static inline void mtk_wed_flow_remove(int index)
+{
+}
+
+static inline void mtk_wed_fe_reset(void)
+{
+}
+
+static inline void mtk_wed_fe_reset_complete(void)
+{
+}
+#endif
+
+#ifdef CONFIG_DEBUG_FS
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw);
+#else
+static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+}
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
new file mode 100644
index 0000000000..e24afeaea0
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c
@@ -0,0 +1,267 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/seq_file.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+
+struct reg_dump {
+ const char *name;
+ u16 offset;
+ u8 type;
+ u8 base;
+};
+
+enum {
+ DUMP_TYPE_STRING,
+ DUMP_TYPE_WED,
+ DUMP_TYPE_WDMA,
+ DUMP_TYPE_WPDMA_TX,
+ DUMP_TYPE_WPDMA_TXFREE,
+ DUMP_TYPE_WPDMA_RX,
+ DUMP_TYPE_WED_RRO,
+};
+
+#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING }
+#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ }
+#define DUMP_RING(_prefix, _base, ...) \
+ { _prefix " BASE", _base, __VA_ARGS__ }, \
+ { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \
+ { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \
+ { _prefix " DIDX", _base + 0xc, __VA_ARGS__ }
+
+#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED)
+#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED)
+
+#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA)
+#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA)
+
+#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n)
+#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE)
+#define DUMP_WPDMA_RX_RING(_n) DUMP_RING("WPDMA_RX" #_n, 0, DUMP_TYPE_WPDMA_RX, _n)
+#define DUMP_WED_RRO_RING(_base)DUMP_RING("WED_RRO_MIOD", MTK_##_base, DUMP_TYPE_WED_RRO)
+#define DUMP_WED_RRO_FDBK(_base)DUMP_RING("WED_RRO_FDBK", MTK_##_base, DUMP_TYPE_WED_RRO)
+
+static void
+print_reg_val(struct seq_file *s, const char *name, u32 val)
+{
+ seq_printf(s, "%-32s %08x\n", name, val);
+}
+
+static void
+dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev,
+ const struct reg_dump *regs, int n_regs)
+{
+ const struct reg_dump *cur;
+ u32 val;
+
+ for (cur = regs; cur < &regs[n_regs]; cur++) {
+ switch (cur->type) {
+ case DUMP_TYPE_STRING:
+ seq_printf(s, "%s======== %s:\n",
+ cur > regs ? "\n" : "",
+ cur->name);
+ continue;
+ case DUMP_TYPE_WED_RRO:
+ case DUMP_TYPE_WED:
+ val = wed_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WDMA:
+ val = wdma_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TX:
+ val = wpdma_tx_r32(dev, cur->base, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_TXFREE:
+ val = wpdma_txfree_r32(dev, cur->offset);
+ break;
+ case DUMP_TYPE_WPDMA_RX:
+ val = wpdma_rx_r32(dev, cur->base, cur->offset);
+ break;
+ }
+ print_reg_val(s, cur->name, val);
+ }
+}
+
+static int
+wed_txinfo_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WED TX"),
+ DUMP_WED(WED_TX_MIB(0)),
+ DUMP_WED_RING(WED_RING_TX(0)),
+
+ DUMP_WED(WED_TX_MIB(1)),
+ DUMP_WED_RING(WED_RING_TX(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WED(WED_WPDMA_TX_MIB(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(0)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)),
+
+ DUMP_WED(WED_WPDMA_TX_MIB(1)),
+ DUMP_WED_RING(WED_WPDMA_RING_TX(1)),
+ DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)),
+
+ DUMP_STR("WPDMA TX"),
+ DUMP_WPDMA_TX_RING(0),
+ DUMP_WPDMA_TX_RING(1),
+
+ DUMP_STR("WED WDMA RX"),
+ DUMP_WED(WED_WDMA_RX_MIB(0)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(0)),
+ DUMP_WED(WED_WDMA_RX_THRES(0)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)),
+
+ DUMP_WED(WED_WDMA_RX_MIB(1)),
+ DUMP_WED_RING(WED_WDMA_RING_RX(1)),
+ DUMP_WED(WED_WDMA_RX_THRES(1)),
+ DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)),
+ DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)),
+
+ DUMP_STR("WDMA RX"),
+ DUMP_WDMA(WDMA_GLO_CFG),
+ DUMP_WDMA_RING(WDMA_RING_RX(0)),
+ DUMP_WDMA_RING(WDMA_RING_RX(1)),
+
+ DUMP_STR("WED TX FREE"),
+ DUMP_WED(WED_RX_MIB(0)),
+ DUMP_WED_RING(WED_RING_RX(0)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(0)),
+ DUMP_WED(WED_RX_MIB(1)),
+ DUMP_WED_RING(WED_RING_RX(1)),
+ DUMP_WED(WED_WPDMA_RX_COHERENT_MIB(1)),
+
+ DUMP_STR("WED WPDMA TX FREE"),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX(1)),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_txinfo);
+
+static int
+wed_rxinfo_show(struct seq_file *s, void *data)
+{
+ static const struct reg_dump regs[] = {
+ DUMP_STR("WPDMA RX"),
+ DUMP_WPDMA_RX_RING(0),
+ DUMP_WPDMA_RX_RING(1),
+
+ DUMP_STR("WPDMA RX"),
+ DUMP_WED(WED_WPDMA_RX_D_MIB(0)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(0)),
+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(0)),
+ DUMP_WED(WED_WPDMA_RX_D_MIB(1)),
+ DUMP_WED_RING(WED_WPDMA_RING_RX_DATA(1)),
+ DUMP_WED(WED_WPDMA_RX_D_PROCESSED_MIB(1)),
+ DUMP_WED(WED_WPDMA_RX_D_COHERENT_MIB),
+
+ DUMP_STR("WED RX"),
+ DUMP_WED_RING(WED_RING_RX_DATA(0)),
+ DUMP_WED_RING(WED_RING_RX_DATA(1)),
+
+ DUMP_STR("WED RRO"),
+ DUMP_WED_RRO_RING(WED_RROQM_MIOD_CTRL0),
+ DUMP_WED(WED_RROQM_MID_MIB),
+ DUMP_WED(WED_RROQM_MOD_MIB),
+ DUMP_WED(WED_RROQM_MOD_COHERENT_MIB),
+ DUMP_WED_RRO_FDBK(WED_RROQM_FDBK_CTRL0),
+ DUMP_WED(WED_RROQM_FDBK_IND_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ENQ_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ANC_MIB),
+ DUMP_WED(WED_RROQM_FDBK_ANC2H_MIB),
+
+ DUMP_STR("WED Route QM"),
+ DUMP_WED(WED_RTQM_R2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(0)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(0)),
+ DUMP_WED(WED_RTQM_R2H_MIB(1)),
+ DUMP_WED(WED_RTQM_R2Q_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2H_MIB(1)),
+ DUMP_WED(WED_RTQM_Q2N_MIB),
+ DUMP_WED(WED_RTQM_Q2B_MIB),
+ DUMP_WED(WED_RTQM_PFDBK_MIB),
+
+ DUMP_STR("WED WDMA TX"),
+ DUMP_WED(WED_WDMA_TX_MIB),
+ DUMP_WED_RING(WED_WDMA_RING_TX),
+
+ DUMP_STR("WDMA TX"),
+ DUMP_WDMA(WDMA_GLO_CFG),
+ DUMP_WDMA_RING(WDMA_RING_TX(0)),
+ DUMP_WDMA_RING(WDMA_RING_TX(1)),
+
+ DUMP_STR("WED RX BM"),
+ DUMP_WED(WED_RX_BM_BASE),
+ DUMP_WED(WED_RX_BM_RX_DMAD),
+ DUMP_WED(WED_RX_BM_PTR),
+ DUMP_WED(WED_RX_BM_TKID_MIB),
+ DUMP_WED(WED_RX_BM_BLEN),
+ DUMP_WED(WED_RX_BM_STS),
+ DUMP_WED(WED_RX_BM_INTF2),
+ DUMP_WED(WED_RX_BM_INTF),
+ DUMP_WED(WED_RX_BM_ERR_STS),
+ };
+ struct mtk_wed_hw *hw = s->private;
+ struct mtk_wed_device *dev = hw->wed_dev;
+
+ if (dev)
+ dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs));
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(wed_rxinfo);
+
+static int
+mtk_wed_reg_set(void *data, u64 val)
+{
+ struct mtk_wed_hw *hw = data;
+
+ regmap_write(hw->regs, hw->debugfs_reg, val);
+
+ return 0;
+}
+
+static int
+mtk_wed_reg_get(void *data, u64 *val)
+{
+ struct mtk_wed_hw *hw = data;
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(hw->regs, hw->debugfs_reg, &regval);
+ if (ret)
+ return ret;
+
+ *val = regval;
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set,
+ "0x%08llx\n");
+
+void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw)
+{
+ struct dentry *dir;
+
+ snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index);
+ dir = debugfs_create_dir(hw->dirname, NULL);
+
+ hw->debugfs_dir = dir;
+ debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg);
+ debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval);
+ debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops);
+ if (hw->version != 1)
+ debugfs_create_file_unsafe("rxinfo", 0400, dir, hw,
+ &wed_rxinfo_fops);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_mcu.c b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
new file mode 100644
index 0000000000..071ed3dea8
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_mcu.c
@@ -0,0 +1,395 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sujuan Chen <sujuan.chen@mediatek.com>
+ */
+
+#include <linux/firmware.h>
+#include <linux/of_address.h>
+#include <linux/of_reserved_mem.h>
+#include <linux/mfd/syscon.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+#include <asm/unaligned.h>
+
+#include "mtk_wed_regs.h"
+#include "mtk_wed_wo.h"
+#include "mtk_wed.h"
+
+static u32 wo_r32(struct mtk_wed_wo *wo, u32 reg)
+{
+ return readl(wo->boot.addr + reg);
+}
+
+static void wo_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+{
+ writel(val, wo->boot.addr + reg);
+}
+
+static struct sk_buff *
+mtk_wed_mcu_msg_alloc(const void *data, int data_len)
+{
+ int length = sizeof(struct mtk_wed_mcu_hdr) + data_len;
+ struct sk_buff *skb;
+
+ skb = alloc_skb(length, GFP_KERNEL);
+ if (!skb)
+ return NULL;
+
+ memset(skb->head, 0, length);
+ skb_reserve(skb, sizeof(struct mtk_wed_mcu_hdr));
+ if (data && data_len)
+ skb_put_data(skb, data, data_len);
+
+ return skb;
+}
+
+static struct sk_buff *
+mtk_wed_mcu_get_response(struct mtk_wed_wo *wo, unsigned long expires)
+{
+ if (!time_is_after_jiffies(expires))
+ return NULL;
+
+ wait_event_timeout(wo->mcu.wait, !skb_queue_empty(&wo->mcu.res_q),
+ expires - jiffies);
+ return skb_dequeue(&wo->mcu.res_q);
+}
+
+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb)
+{
+ skb_queue_tail(&wo->mcu.res_q, skb);
+ wake_up(&wo->mcu.wait);
+}
+
+static void
+mtk_wed_update_rx_stats(struct mtk_wed_device *wed, struct sk_buff *skb)
+{
+ u32 count = get_unaligned_le32(skb->data);
+ struct mtk_wed_wo_rx_stats *stats;
+ int i;
+
+ if (count * sizeof(*stats) > skb->len - sizeof(u32))
+ return;
+
+ stats = (struct mtk_wed_wo_rx_stats *)(skb->data + sizeof(u32));
+ for (i = 0 ; i < count ; i++)
+ wed->wlan.update_wo_rx_stats(wed, &stats[i]);
+}
+
+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
+ struct sk_buff *skb)
+{
+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+
+ skb_pull(skb, sizeof(*hdr));
+
+ switch (hdr->cmd) {
+ case MTK_WED_WO_EVT_LOG_DUMP:
+ dev_notice(wo->hw->dev, "%s\n", skb->data);
+ break;
+ case MTK_WED_WO_EVT_PROFILING: {
+ struct mtk_wed_wo_log_info *info = (void *)skb->data;
+ u32 count = skb->len / sizeof(*info);
+ int i;
+
+ for (i = 0 ; i < count ; i++)
+ dev_notice(wo->hw->dev,
+ "SN:%u latency: total=%u, rro:%u, mod:%u\n",
+ le32_to_cpu(info[i].sn),
+ le32_to_cpu(info[i].total),
+ le32_to_cpu(info[i].rro),
+ le32_to_cpu(info[i].mod));
+ break;
+ }
+ case MTK_WED_WO_EVT_RXCNT_INFO:
+ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
+ break;
+ default:
+ break;
+ }
+
+ dev_kfree_skb(skb);
+}
+
+static int
+mtk_wed_mcu_skb_send_msg(struct mtk_wed_wo *wo, struct sk_buff *skb,
+ int id, int cmd, u16 *wait_seq, bool wait_resp)
+{
+ struct mtk_wed_mcu_hdr *hdr;
+
+ /* TODO: make it dynamic based on cmd */
+ wo->mcu.timeout = 20 * HZ;
+
+ hdr = (struct mtk_wed_mcu_hdr *)skb_push(skb, sizeof(*hdr));
+ hdr->cmd = cmd;
+ hdr->length = cpu_to_le16(skb->len);
+
+ if (wait_resp && wait_seq) {
+ u16 seq = ++wo->mcu.seq;
+
+ if (!seq)
+ seq = ++wo->mcu.seq;
+ *wait_seq = seq;
+
+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_NEED_RSP);
+ hdr->seq = cpu_to_le16(seq);
+ }
+ if (id == MTK_WED_MODULE_ID_WO)
+ hdr->flag |= cpu_to_le16(MTK_WED_WARP_CMD_FLAG_FROM_TO_WO);
+
+ return mtk_wed_wo_queue_tx_skb(wo, &wo->q_tx, skb);
+}
+
+static int
+mtk_wed_mcu_parse_response(struct mtk_wed_wo *wo, struct sk_buff *skb,
+ int cmd, int seq)
+{
+ struct mtk_wed_mcu_hdr *hdr;
+
+ if (!skb) {
+ dev_err(wo->hw->dev, "Message %08x (seq %d) timeout\n",
+ cmd, seq);
+ return -ETIMEDOUT;
+ }
+
+ hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+ if (le16_to_cpu(hdr->seq) != seq)
+ return -EAGAIN;
+
+ skb_pull(skb, sizeof(*hdr));
+ switch (cmd) {
+ case MTK_WED_WO_CMD_RXCNT_INFO:
+ mtk_wed_update_rx_stats(wo->hw->wed_dev, skb);
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
+ const void *data, int len, bool wait_resp)
+{
+ unsigned long expires;
+ struct sk_buff *skb;
+ u16 seq;
+ int ret;
+
+ skb = mtk_wed_mcu_msg_alloc(data, len);
+ if (!skb)
+ return -ENOMEM;
+
+ mutex_lock(&wo->mcu.mutex);
+
+ ret = mtk_wed_mcu_skb_send_msg(wo, skb, id, cmd, &seq, wait_resp);
+ if (ret || !wait_resp)
+ goto unlock;
+
+ expires = jiffies + wo->mcu.timeout;
+ do {
+ skb = mtk_wed_mcu_get_response(wo, expires);
+ ret = mtk_wed_mcu_parse_response(wo, skb, cmd, seq);
+ dev_kfree_skb(skb);
+ } while (ret == -EAGAIN);
+
+unlock:
+ mutex_unlock(&wo->mcu.mutex);
+
+ return ret;
+}
+
+int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
+ int len)
+{
+ struct mtk_wed_wo *wo = dev->hw->wed_wo;
+
+ if (dev->hw->version == 1)
+ return 0;
+
+ if (WARN_ON(!wo))
+ return -ENODEV;
+
+ return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, id, data, len,
+ true);
+}
+
+static int
+mtk_wed_get_memory_region(struct mtk_wed_wo *wo,
+ struct mtk_wed_wo_memory_region *region)
+{
+ struct reserved_mem *rmem;
+ struct device_node *np;
+ int index;
+
+ index = of_property_match_string(wo->hw->node, "memory-region-names",
+ region->name);
+ if (index < 0)
+ return index;
+
+ np = of_parse_phandle(wo->hw->node, "memory-region", index);
+ if (!np)
+ return -ENODEV;
+
+ rmem = of_reserved_mem_lookup(np);
+ of_node_put(np);
+
+ if (!rmem)
+ return -ENODEV;
+
+ region->phy_addr = rmem->base;
+ region->size = rmem->size;
+ region->addr = devm_ioremap(wo->hw->dev, region->phy_addr, region->size);
+
+ return !region->addr ? -EINVAL : 0;
+}
+
+static int
+mtk_wed_mcu_run_firmware(struct mtk_wed_wo *wo, const struct firmware *fw,
+ struct mtk_wed_wo_memory_region *region)
+{
+ const u8 *first_region_ptr, *region_ptr, *trailer_ptr, *ptr = fw->data;
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct mtk_wed_fw_region *fw_region;
+
+ trailer_ptr = fw->data + fw->size - sizeof(*trailer);
+ trailer = (const struct mtk_wed_fw_trailer *)trailer_ptr;
+ region_ptr = trailer_ptr - trailer->num_region * sizeof(*fw_region);
+ first_region_ptr = region_ptr;
+
+ while (region_ptr < trailer_ptr) {
+ u32 length;
+
+ fw_region = (const struct mtk_wed_fw_region *)region_ptr;
+ length = le32_to_cpu(fw_region->len);
+
+ if (region->phy_addr != le32_to_cpu(fw_region->addr))
+ goto next;
+
+ if (region->size < length)
+ goto next;
+
+ if (first_region_ptr < ptr + length)
+ goto next;
+
+ if (region->shared && region->consumed)
+ return 0;
+
+ if (!region->shared || !region->consumed) {
+ memcpy_toio(region->addr, ptr, length);
+ region->consumed = true;
+ return 0;
+ }
+next:
+ region_ptr += sizeof(*fw_region);
+ ptr += length;
+ }
+
+ return -EINVAL;
+}
+
+static int
+mtk_wed_mcu_load_firmware(struct mtk_wed_wo *wo)
+{
+ static struct mtk_wed_wo_memory_region mem_region[] = {
+ [MTK_WED_WO_REGION_EMI] = {
+ .name = "wo-emi",
+ },
+ [MTK_WED_WO_REGION_ILM] = {
+ .name = "wo-ilm",
+ },
+ [MTK_WED_WO_REGION_DATA] = {
+ .name = "wo-data",
+ .shared = true,
+ },
+ };
+ const struct mtk_wed_fw_trailer *trailer;
+ const struct firmware *fw;
+ const char *fw_name;
+ u32 val, boot_cr;
+ int ret, i;
+
+ /* load firmware region metadata */
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ ret = mtk_wed_get_memory_region(wo, &mem_region[i]);
+ if (ret)
+ return ret;
+ }
+
+ wo->boot.name = "wo-boot";
+ ret = mtk_wed_get_memory_region(wo, &wo->boot);
+ if (ret)
+ return ret;
+
+ /* set dummy cr */
+ wed_w32(wo->hw->wed_dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL,
+ wo->hw->index + 1);
+
+ /* load firmware */
+ if (of_device_is_compatible(wo->hw->node, "mediatek,mt7981-wed"))
+ fw_name = MT7981_FIRMWARE_WO;
+ else
+ fw_name = wo->hw->index ? MT7986_FIRMWARE_WO1 : MT7986_FIRMWARE_WO0;
+
+ ret = request_firmware(&fw, fw_name, wo->hw->dev);
+ if (ret)
+ return ret;
+
+ trailer = (void *)(fw->data + fw->size -
+ sizeof(struct mtk_wed_fw_trailer));
+ dev_info(wo->hw->dev,
+ "MTK WED WO Firmware Version: %.10s, Build Time: %.15s\n",
+ trailer->fw_ver, trailer->build_date);
+ dev_info(wo->hw->dev, "MTK WED WO Chip ID %02x Region %d\n",
+ trailer->chip_id, trailer->num_region);
+
+ for (i = 0; i < ARRAY_SIZE(mem_region); i++) {
+ ret = mtk_wed_mcu_run_firmware(wo, fw, &mem_region[i]);
+ if (ret)
+ goto out;
+ }
+
+ /* set the start address */
+ boot_cr = wo->hw->index ? MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR
+ : MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR;
+ wo_w32(wo, boot_cr, mem_region[MTK_WED_WO_REGION_EMI].phy_addr >> 16);
+ /* wo firmware reset */
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR, 0xc00);
+
+ val = wo_r32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR);
+ val |= wo->hw->index ? MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK
+ : MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK;
+ wo_w32(wo, MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR, val);
+out:
+ release_firmware(fw);
+
+ return ret;
+}
+
+static u32
+mtk_wed_mcu_read_fw_dl(struct mtk_wed_wo *wo)
+{
+ return wed_r32(wo->hw->wed_dev,
+ MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_FWDL);
+}
+
+int mtk_wed_mcu_init(struct mtk_wed_wo *wo)
+{
+ u32 val;
+ int ret;
+
+ skb_queue_head_init(&wo->mcu.res_q);
+ init_waitqueue_head(&wo->mcu.wait);
+ mutex_init(&wo->mcu.mutex);
+
+ ret = mtk_wed_mcu_load_firmware(wo);
+ if (ret)
+ return ret;
+
+ return readx_poll_timeout(mtk_wed_mcu_read_fw_dl, wo, val, !val,
+ 100, MTK_FW_DL_TIMEOUT);
+}
+
+MODULE_FIRMWARE(MT7981_FIRMWARE_WO);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WO0);
+MODULE_FIRMWARE(MT7986_FIRMWARE_WO1);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
new file mode 100644
index 0000000000..a5d9d8a5bc
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#include <linux/kernel.h>
+#include <linux/soc/mediatek/mtk_wed.h>
+
+const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
+EXPORT_SYMBOL_GPL(mtk_soc_wed_ops);
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
new file mode 100644
index 0000000000..f87ab9b8a5
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */
+
+#ifndef __MTK_WED_REGS_H
+#define __MTK_WED_REGS_H
+
+#define MTK_WFDMA_DESC_CTRL_TO_HOST BIT(8)
+#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0)
+#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15)
+#define MTK_WDMA_DESC_CTRL_BURST BIT(16)
+#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16)
+#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30)
+#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31)
+
+struct mtk_wdma_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+} __packed __aligned(4);
+
+#define MTK_WED_REV_ID 0x004
+
+#define MTK_WED_RESET 0x008
+#define MTK_WED_RESET_TX_BM BIT(0)
+#define MTK_WED_RESET_RX_BM BIT(1)
+#define MTK_WED_RESET_TX_FREE_AGENT BIT(4)
+#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8)
+#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9)
+#define MTK_WED_RESET_WPDMA_RX_D_DRV BIT(10)
+#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11)
+#define MTK_WED_RESET_WED_TX_DMA BIT(12)
+#define MTK_WED_RESET_WED_RX_DMA BIT(13)
+#define MTK_WED_RESET_WDMA_TX_DRV BIT(16)
+#define MTK_WED_RESET_WDMA_RX_DRV BIT(17)
+#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19)
+#define MTK_WED_RESET_RX_RRO_QM BIT(20)
+#define MTK_WED_RESET_RX_ROUTE_QM BIT(21)
+#define MTK_WED_RESET_WED BIT(31)
+
+#define MTK_WED_CTRL 0x00c
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0)
+#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2)
+#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3)
+#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8)
+#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10)
+#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11)
+#define MTK_WED_CTRL_WED_RX_BM_EN BIT(12)
+#define MTK_WED_CTRL_WED_RX_BM_BUSY BIT(13)
+#define MTK_WED_CTRL_RX_RRO_QM_EN BIT(14)
+#define MTK_WED_CTRL_RX_RRO_QM_BUSY BIT(15)
+#define MTK_WED_CTRL_RX_ROUTE_QM_EN BIT(16)
+#define MTK_WED_CTRL_RX_ROUTE_QM_BUSY BIT(17)
+#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24)
+#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25)
+#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28)
+
+#define MTK_WED_EXT_INT_STATUS 0x020
+#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0)
+#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1)
+#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8)
+#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9)
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */
+#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20)
+#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22)
+#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24)
+#define MTK_WED_EXT_INT_STATUS_RX_DRV_GET_BM_DMAD_SKIP BIT(25)
+#define MTK_WED_EXT_INT_STATUS_WPDMA_RX_D_DRV_ERR BIT(26)
+#define MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY BIT(27)
+#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \
+ MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \
+ MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \
+ MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \
+ MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR)
+
+#define MTK_WED_EXT_INT_MASK 0x028
+#define MTK_WED_EXT_INT_MASK1 0x02c
+#define MTK_WED_EXT_INT_MASK2 0x030
+
+#define MTK_WED_STATUS 0x060
+#define MTK_WED_STATUS_TX GENMASK(15, 8)
+
+#define MTK_WED_TX_BM_CTRL 0x080
+#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_BM_BASE 0x084
+
+#define MTK_WED_TX_BM_TKID 0x088
+#define MTK_WED_TX_BM_TKID_V2 0x0c8
+#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0)
+#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16)
+
+#define MTK_WED_TX_BM_BUF_LEN 0x08c
+
+#define MTK_WED_TX_BM_INTF 0x09c
+#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0)
+#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16)
+#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28)
+#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29)
+
+#define MTK_WED_TX_BM_DYN_THR 0x0a0
+#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0)
+#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16)
+#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16)
+
+#define MTK_WED_TX_TKID_CTRL 0x0c0
+#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0)
+#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16)
+#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28)
+
+#define MTK_WED_TX_TKID_DYN_THR 0x0e0
+#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0)
+#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16)
+
+#define MTK_WED_TXP_DW0 0x120
+#define MTK_WED_TXP_DW1 0x124
+#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16)
+#define MTK_WED_TXDP_CTRL 0x130
+#define MTK_WED_TXDP_DW9_OVERWR BIT(9)
+#define MTK_WED_RX_BM_TKID_MIB 0x1cc
+
+#define MTK_WED_INT_STATUS 0x200
+#define MTK_WED_INT_MASK 0x204
+
+#define MTK_WED_GLO_CFG 0x208
+#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+#define MTK_WED_RESET_IDX 0x20c
+#define MTK_WED_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_RESET_IDX_RX_V2 GENMASK(7, 6)
+#define MTK_WED_RESET_WPDMA_IDX_RX GENMASK(31, 30)
+
+#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4)
+#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4)
+
+#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10)
+
+#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10)
+#define MTK_WED_RING_RX_DATA(_n) (0x420 + (_n) * 0x10)
+
+#define MTK_WED_SCR0 0x3c0
+#define MTK_WED_WPDMA_INT_TRIGGER 0x504
+#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1)
+#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4)
+
+#define MTK_WED_WPDMA_GLO_CFG 0x508
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9)
+#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10)
+#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12)
+#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13)
+#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22)
+#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27)
+#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28)
+#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29)
+#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31)
+
+/* CONFIG_MEDIATEK_NETSYS_V2 */
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20)
+#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21)
+#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24)
+#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28)
+
+#define MTK_WED_WPDMA_RESET_IDX 0x50c
+#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WED_WPDMA_CTRL 0x518
+#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31)
+
+#define MTK_WED_WPDMA_INT_CTRL 0x520
+#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21)
+#define MTK_WED_WPDMA_INT_CTRL_SIG_SRC BIT(22)
+#define MTK_WED_WPDMA_INT_CTRL_SRC_SEL GENMASK(17, 16)
+
+#define MTK_WED_WPDMA_INT_MASK 0x524
+
+#define MTK_WED_WPDMA_INT_CTRL_TX 0x530
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_RX 0x534
+#define MTK_WED_WPDMA_INT_CTRL_RX0_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_RX0_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG GENMASK(6, 2)
+#define MTK_WED_WPDMA_INT_CTRL_RX1_EN BIT(8)
+#define MTK_WED_WPDMA_INT_CTRL_RX1_CLR BIT(9)
+#define MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG GENMASK(14, 10)
+
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1)
+#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2)
+
+#define MTK_WED_PCIE_CFG_BASE 0x560
+
+#define MTK_WED_PCIE_CFG_BASE 0x560
+#define MTK_WED_PCIE_CFG_INTM 0x564
+#define MTK_WED_PCIE_CFG_MSIS 0x568
+#define MTK_WED_PCIE_INT_TRIGGER 0x570
+#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16)
+
+#define MTK_WED_PCIE_INT_CTRL 0x57c
+#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20)
+#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16)
+#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12)
+
+#define MTK_WED_WPDMA_CFG_BASE 0x580
+#define MTK_WED_WPDMA_CFG_INT_MASK 0x584
+#define MTK_WED_WPDMA_CFG_TX 0x588
+#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c
+
+#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4)
+#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_MIB(_n) (0x5e0 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_COHERENT_MIB(_n) (0x5f0 + (_n) * 4)
+
+#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10)
+#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10)
+#define MTK_WED_WPDMA_RING_RX_DATA(_n) (0x730 + (_n) * 0x10)
+
+#define MTK_WED_WPDMA_RX_D_GLO_CFG 0x75c
+#define MTK_WED_WPDMA_RX_D_RX_DRV_EN BIT(0)
+#define MTK_WED_WPDMA_RX_D_RX_DRV_BUSY BIT(1)
+#define MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE BIT(3)
+#define MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE BIT(4)
+#define MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL GENMASK(11, 7)
+#define MTK_WED_WPDMA_RX_D_RXD_READ_LEN GENMASK(31, 24)
+
+#define MTK_WED_WPDMA_RX_D_RST_IDX 0x760
+#define MTK_WED_WPDMA_RX_D_RST_CRX_IDX GENMASK(17, 16)
+#define MTK_WED_WPDMA_RX_D_RST_DRV_IDX GENMASK(25, 24)
+
+#define MTK_WED_WPDMA_RX_GLO_CFG 0x76c
+#define MTK_WED_WPDMA_RX_RING 0x770
+
+#define MTK_WED_WPDMA_RX_D_MIB(_n) (0x774 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_D_PROCESSED_MIB(_n) (0x784 + (_n) * 4)
+#define MTK_WED_WPDMA_RX_D_COHERENT_MIB 0x78c
+
+#define MTK_WED_WDMA_RING_TX 0x800
+
+#define MTK_WED_WDMA_TX_MIB 0x810
+
+#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10)
+#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4)
+
+#define MTK_WED_WDMA_GLO_CFG 0xa04
+#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0)
+#define MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK BIT(1)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2)
+#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3)
+#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4)
+#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6)
+#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13)
+#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17)
+#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18)
+#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19)
+#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20)
+#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22)
+#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24)
+#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25)
+#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26)
+#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30)
+
+#define MTK_WED_WDMA_RESET_IDX 0xa08
+#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16)
+#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24)
+
+#define MTK_WED_WDMA_INT_CLR 0xa24
+#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16)
+
+#define MTK_WED_WDMA_INT_TRIGGER 0xa28
+#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16)
+
+#define MTK_WED_WDMA_INT_CTRL 0xa2c
+#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16)
+
+#define MTK_WED_WDMA_CFG_BASE 0xaa0
+#define MTK_WED_WDMA_OFFSET0 0xaa4
+#define MTK_WED_WDMA_OFFSET1 0xaa8
+
+#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16)
+#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0)
+#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16)
+
+#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4)
+#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4)
+#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4)
+
+#define MTK_WED_RX_BM_RX_DMAD 0xd80
+#define MTK_WED_RX_BM_RX_DMAD_SDL0 GENMASK(13, 0)
+
+#define MTK_WED_RX_BM_BASE 0xd84
+#define MTK_WED_RX_BM_INIT_PTR 0xd88
+#define MTK_WED_RX_BM_SW_TAIL GENMASK(15, 0)
+#define MTK_WED_RX_BM_INIT_SW_TAIL BIT(16)
+
+#define MTK_WED_RX_PTR 0xd8c
+
+#define MTK_WED_RX_BM_DYN_ALLOC_TH 0xdb4
+#define MTK_WED_RX_BM_DYN_ALLOC_TH_H GENMASK(31, 16)
+#define MTK_WED_RX_BM_DYN_ALLOC_TH_L GENMASK(15, 0)
+
+#define MTK_WED_RING_OFS_BASE 0x00
+#define MTK_WED_RING_OFS_COUNT 0x04
+#define MTK_WED_RING_OFS_CPU_IDX 0x08
+#define MTK_WED_RING_OFS_DMA_IDX 0x0c
+
+#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10)
+#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10)
+
+#define MTK_WDMA_GLO_CFG 0x204
+#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0)
+#define MTK_WDMA_GLO_CFG_TX_DMA_BUSY BIT(1)
+#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2)
+#define MTK_WDMA_GLO_CFG_RX_DMA_BUSY BIT(3)
+#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26)
+#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27)
+#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28)
+
+#define MTK_WDMA_RESET_IDX 0x208
+#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0)
+#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16)
+
+#define MTK_WDMA_INT_STATUS 0x220
+
+#define MTK_WDMA_INT_MASK 0x228
+#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0)
+#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16)
+#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28)
+#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29)
+#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30)
+#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31)
+
+#define MTK_WDMA_INT_GRP1 0x250
+#define MTK_WDMA_INT_GRP2 0x254
+
+#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0)
+#define MTK_PCIE_MIRROR_MAP_EN BIT(0)
+#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1)
+
+/* DMA channel mapping */
+#define HIFSYS_DMA_AG_MAP 0x008
+
+#define MTK_WED_RTQM_GLO_CFG 0xb00
+#define MTK_WED_RTQM_BUSY BIT(1)
+#define MTK_WED_RTQM_Q_RST BIT(2)
+#define MTK_WED_RTQM_Q_DBG_BYPASS BIT(5)
+#define MTK_WED_RTQM_TXDMAD_FPORT GENMASK(23, 20)
+
+#define MTK_WED_RTQM_R2H_MIB(_n) (0xb70 + (_n) * 0x4)
+#define MTK_WED_RTQM_R2Q_MIB(_n) (0xb78 + (_n) * 0x4)
+#define MTK_WED_RTQM_Q2N_MIB 0xb80
+#define MTK_WED_RTQM_Q2H_MIB(_n) (0xb84 + (_n) * 0x4)
+
+#define MTK_WED_RTQM_Q2B_MIB 0xb8c
+#define MTK_WED_RTQM_PFDBK_MIB 0xb90
+
+#define MTK_WED_RROQM_GLO_CFG 0xc04
+#define MTK_WED_RROQM_RST_IDX 0xc08
+#define MTK_WED_RROQM_RST_IDX_MIOD BIT(0)
+#define MTK_WED_RROQM_RST_IDX_FDBK BIT(4)
+
+#define MTK_WED_RROQM_MIOD_CTRL0 0xc40
+#define MTK_WED_RROQM_MIOD_CTRL1 0xc44
+#define MTK_WED_RROQM_MIOD_CNT GENMASK(11, 0)
+
+#define MTK_WED_RROQM_MIOD_CTRL2 0xc48
+#define MTK_WED_RROQM_MIOD_CTRL3 0xc4c
+
+#define MTK_WED_RROQM_FDBK_CTRL0 0xc50
+#define MTK_WED_RROQM_FDBK_CTRL1 0xc54
+#define MTK_WED_RROQM_FDBK_CNT GENMASK(11, 0)
+
+#define MTK_WED_RROQM_FDBK_CTRL2 0xc58
+
+#define MTK_WED_RROQ_BASE_L 0xc80
+#define MTK_WED_RROQ_BASE_H 0xc84
+
+#define MTK_WED_RROQM_MIOD_CFG 0xc8c
+#define MTK_WED_RROQM_MIOD_MID_DW GENMASK(5, 0)
+#define MTK_WED_RROQM_MIOD_MOD_DW GENMASK(13, 8)
+#define MTK_WED_RROQM_MIOD_ENTRY_DW GENMASK(22, 16)
+
+#define MTK_WED_RROQM_MID_MIB 0xcc0
+#define MTK_WED_RROQM_MOD_MIB 0xcc4
+#define MTK_WED_RROQM_MOD_COHERENT_MIB 0xcc8
+#define MTK_WED_RROQM_FDBK_MIB 0xcd0
+#define MTK_WED_RROQM_FDBK_COHERENT_MIB 0xcd4
+#define MTK_WED_RROQM_FDBK_IND_MIB 0xce0
+#define MTK_WED_RROQM_FDBK_ENQ_MIB 0xce4
+#define MTK_WED_RROQM_FDBK_ANC_MIB 0xce8
+#define MTK_WED_RROQM_FDBK_ANC2H_MIB 0xcec
+
+#define MTK_WED_RX_BM_RX_DMAD 0xd80
+#define MTK_WED_RX_BM_BASE 0xd84
+#define MTK_WED_RX_BM_INIT_PTR 0xd88
+#define MTK_WED_RX_BM_PTR 0xd8c
+#define MTK_WED_RX_BM_PTR_HEAD GENMASK(32, 16)
+#define MTK_WED_RX_BM_PTR_TAIL GENMASK(15, 0)
+
+#define MTK_WED_RX_BM_BLEN 0xd90
+#define MTK_WED_RX_BM_STS 0xd94
+#define MTK_WED_RX_BM_INTF2 0xd98
+#define MTK_WED_RX_BM_INTF 0xd9c
+#define MTK_WED_RX_BM_ERR_STS 0xda8
+
+#define MTK_WED_WOCPU_VIEW_MIOD_BASE 0x8000
+#define MTK_WED_PCIE_INT_MASK 0x0
+
+#endif
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
new file mode 100644
index 0000000000..ae44ad5f8c
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -0,0 +1,503 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (C) 2022 MediaTek Inc.
+ *
+ * Author: Lorenzo Bianconi <lorenzo@kernel.org>
+ * Sujuan Chen <sujuan.chen@mediatek.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/bitfield.h>
+
+#include "mtk_wed.h"
+#include "mtk_wed_regs.h"
+#include "mtk_wed_wo.h"
+
+static u32
+mtk_wed_mmio_r32(struct mtk_wed_wo *wo, u32 reg)
+{
+ u32 val;
+
+ if (regmap_read(wo->mmio.regs, reg, &val))
+ val = ~0;
+
+ return val;
+}
+
+static void
+mtk_wed_mmio_w32(struct mtk_wed_wo *wo, u32 reg, u32 val)
+{
+ regmap_write(wo->mmio.regs, reg, val);
+}
+
+static u32
+mtk_wed_wo_get_isr(struct mtk_wed_wo *wo)
+{
+ u32 val = mtk_wed_mmio_r32(wo, MTK_WED_WO_CCIF_RCHNUM);
+
+ return val & MTK_WED_WO_CCIF_RCHNUM_MASK;
+}
+
+static void
+mtk_wed_wo_set_isr(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_IRQ0_MASK, mask);
+}
+
+static void
+mtk_wed_wo_set_ack(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_ACK, mask);
+}
+
+static void
+mtk_wed_wo_set_isr_mask(struct mtk_wed_wo *wo, u32 mask, u32 val, bool set)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&wo->mmio.lock, flags);
+ wo->mmio.irq_mask &= ~mask;
+ wo->mmio.irq_mask |= val;
+ if (set)
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
+ spin_unlock_irqrestore(&wo->mmio.lock, flags);
+}
+
+static void
+mtk_wed_wo_irq_enable(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_wo_set_isr_mask(wo, 0, mask, false);
+ tasklet_schedule(&wo->mmio.irq_tasklet);
+}
+
+static void
+mtk_wed_wo_irq_disable(struct mtk_wed_wo *wo, u32 mask)
+{
+ mtk_wed_wo_set_isr_mask(wo, mask, 0, true);
+}
+
+static void
+mtk_wed_wo_kickout(struct mtk_wed_wo *wo)
+{
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_BUSY, 1 << MTK_WED_WO_TXCH_NUM);
+ mtk_wed_mmio_w32(wo, MTK_WED_WO_CCIF_TCHNUM, MTK_WED_WO_TXCH_NUM);
+}
+
+static void
+mtk_wed_wo_queue_kick(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ u32 val)
+{
+ wmb();
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, val);
+}
+
+static void *
+mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
+ bool flush)
+{
+ int buf_len = SKB_WITH_OVERHEAD(q->buf_size);
+ int index = (q->tail + 1) % q->n_desc;
+ struct mtk_wed_wo_queue_entry *entry;
+ struct mtk_wed_wo_queue_desc *desc;
+ void *buf;
+
+ if (!q->queued)
+ return NULL;
+
+ if (flush)
+ q->desc[index].ctrl |= cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE);
+ else if (!(q->desc[index].ctrl & cpu_to_le32(MTK_WED_WO_CTL_DMA_DONE)))
+ return NULL;
+
+ q->tail = index;
+ q->queued--;
+
+ desc = &q->desc[index];
+ entry = &q->entry[index];
+ buf = entry->buf;
+ if (len)
+ *len = FIELD_GET(MTK_WED_WO_CTL_SD_LEN0,
+ le32_to_cpu(READ_ONCE(desc->ctrl)));
+ if (buf)
+ dma_unmap_single(wo->hw->dev, entry->addr, buf_len,
+ DMA_FROM_DEVICE);
+ entry->buf = NULL;
+
+ return buf;
+}
+
+static int
+mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ bool rx)
+{
+ enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ int n_buf = 0;
+
+ while (q->queued < q->n_desc) {
+ struct mtk_wed_wo_queue_entry *entry;
+ dma_addr_t addr;
+ void *buf;
+
+ buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
+ if (!buf)
+ break;
+
+ addr = dma_map_single(wo->hw->dev, buf, q->buf_size, dir);
+ if (unlikely(dma_mapping_error(wo->hw->dev, addr))) {
+ skb_free_frag(buf);
+ break;
+ }
+
+ q->head = (q->head + 1) % q->n_desc;
+ entry = &q->entry[q->head];
+ entry->addr = addr;
+ entry->len = q->buf_size;
+ q->entry[q->head].buf = buf;
+
+ if (rx) {
+ struct mtk_wed_wo_queue_desc *desc = &q->desc[q->head];
+ u32 ctrl = MTK_WED_WO_CTL_LAST_SEC0 |
+ FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0,
+ entry->len);
+
+ WRITE_ONCE(desc->buf0, cpu_to_le32(addr));
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+ }
+ q->queued++;
+ n_buf++;
+ }
+
+ return n_buf;
+}
+
+static void
+mtk_wed_wo_rx_complete(struct mtk_wed_wo *wo)
+{
+ mtk_wed_wo_set_ack(wo, MTK_WED_WO_RXCH_INT_MASK);
+ mtk_wed_wo_irq_enable(wo, MTK_WED_WO_RXCH_INT_MASK);
+}
+
+static void
+mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ for (;;) {
+ struct mtk_wed_mcu_hdr *hdr;
+ struct sk_buff *skb;
+ void *data;
+ u32 len;
+
+ data = mtk_wed_wo_dequeue(wo, q, &len, false);
+ if (!data)
+ break;
+
+ skb = build_skb(data, q->buf_size);
+ if (!skb) {
+ skb_free_frag(data);
+ continue;
+ }
+
+ __skb_put(skb, len);
+ if (mtk_wed_mcu_check_msg(wo, skb)) {
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+ if (hdr->flag & cpu_to_le16(MTK_WED_WARP_CMD_FLAG_RSP))
+ mtk_wed_mcu_rx_event(wo, skb);
+ else
+ mtk_wed_mcu_rx_unsolicited_event(wo, skb);
+ }
+
+ if (mtk_wed_wo_queue_refill(wo, q, true)) {
+ u32 index = (q->head - 1) % q->n_desc;
+
+ mtk_wed_wo_queue_kick(wo, q, index);
+ }
+}
+
+static irqreturn_t
+mtk_wed_wo_irq_handler(int irq, void *data)
+{
+ struct mtk_wed_wo *wo = data;
+
+ mtk_wed_wo_set_isr(wo, 0);
+ tasklet_schedule(&wo->mmio.irq_tasklet);
+
+ return IRQ_HANDLED;
+}
+
+static void mtk_wed_wo_irq_tasklet(struct tasklet_struct *t)
+{
+ struct mtk_wed_wo *wo = from_tasklet(wo, t, mmio.irq_tasklet);
+ u32 intr, mask;
+
+ /* disable interrupts */
+ mtk_wed_wo_set_isr(wo, 0);
+
+ intr = mtk_wed_wo_get_isr(wo);
+ intr &= wo->mmio.irq_mask;
+ mask = intr & (MTK_WED_WO_RXCH_INT_MASK | MTK_WED_WO_EXCEPTION_INT_MASK);
+ mtk_wed_wo_irq_disable(wo, mask);
+
+ if (intr & MTK_WED_WO_RXCH_INT_MASK) {
+ mtk_wed_wo_rx_run_queue(wo, &wo->q_rx);
+ mtk_wed_wo_rx_complete(wo);
+ }
+}
+
+/* mtk wed wo hw queues */
+
+static int
+mtk_wed_wo_queue_alloc(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ int n_desc, int buf_size, int index,
+ struct mtk_wed_wo_queue_regs *regs)
+{
+ q->regs = *regs;
+ q->n_desc = n_desc;
+ q->buf_size = buf_size;
+
+ q->desc = dmam_alloc_coherent(wo->hw->dev, n_desc * sizeof(*q->desc),
+ &q->desc_dma, GFP_KERNEL);
+ if (!q->desc)
+ return -ENOMEM;
+
+ q->entry = devm_kzalloc(wo->hw->dev, n_desc * sizeof(*q->entry),
+ GFP_KERNEL);
+ if (!q->entry)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void
+mtk_wed_wo_queue_free(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
+ dma_free_coherent(wo->hw->dev, q->n_desc * sizeof(*q->desc), q->desc,
+ q->desc_dma);
+}
+
+static void
+mtk_wed_wo_queue_tx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < q->n_desc; i++) {
+ struct mtk_wed_wo_queue_entry *entry = &q->entry[i];
+
+ if (!entry->buf)
+ continue;
+
+ dma_unmap_single(wo->hw->dev, entry->addr, entry->len,
+ DMA_TO_DEVICE);
+ skb_free_frag(entry->buf);
+ entry->buf = NULL;
+ }
+
+ if (!q->cache.va)
+ return;
+
+ page = virt_to_page(q->cache.va);
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
+ memset(&q->cache, 0, sizeof(q->cache));
+}
+
+static void
+mtk_wed_wo_queue_rx_clean(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ struct page *page;
+
+ for (;;) {
+ void *buf = mtk_wed_wo_dequeue(wo, q, NULL, true);
+
+ if (!buf)
+ break;
+
+ skb_free_frag(buf);
+ }
+
+ if (!q->cache.va)
+ return;
+
+ page = virt_to_page(q->cache.va);
+ __page_frag_cache_drain(page, q->cache.pagecnt_bias);
+ memset(&q->cache, 0, sizeof(q->cache));
+}
+
+static void
+mtk_wed_wo_queue_reset(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
+{
+ mtk_wed_mmio_w32(wo, q->regs.cpu_idx, 0);
+ mtk_wed_mmio_w32(wo, q->regs.desc_base, q->desc_dma);
+ mtk_wed_mmio_w32(wo, q->regs.ring_size, q->n_desc);
+}
+
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
+ struct sk_buff *skb)
+{
+ struct mtk_wed_wo_queue_entry *entry;
+ struct mtk_wed_wo_queue_desc *desc;
+ int ret = 0, index;
+ u32 ctrl;
+
+ q->tail = mtk_wed_mmio_r32(wo, q->regs.dma_idx);
+ index = (q->head + 1) % q->n_desc;
+ if (q->tail == index) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ entry = &q->entry[index];
+ if (skb->len > entry->len) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ desc = &q->desc[index];
+ q->head = index;
+
+ dma_sync_single_for_cpu(wo->hw->dev, entry->addr, skb->len,
+ DMA_TO_DEVICE);
+ memcpy(entry->buf, skb->data, skb->len);
+ dma_sync_single_for_device(wo->hw->dev, entry->addr, skb->len,
+ DMA_TO_DEVICE);
+
+ ctrl = FIELD_PREP(MTK_WED_WO_CTL_SD_LEN0, skb->len) |
+ MTK_WED_WO_CTL_LAST_SEC0 | MTK_WED_WO_CTL_DMA_DONE;
+ WRITE_ONCE(desc->buf0, cpu_to_le32(entry->addr));
+ WRITE_ONCE(desc->ctrl, cpu_to_le32(ctrl));
+
+ mtk_wed_wo_queue_kick(wo, q, q->head);
+ mtk_wed_wo_kickout(wo);
+out:
+ dev_kfree_skb(skb);
+
+ return ret;
+}
+
+static int
+mtk_wed_wo_exception_init(struct mtk_wed_wo *wo)
+{
+ return 0;
+}
+
+static int
+mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
+{
+ struct mtk_wed_wo_queue_regs regs;
+ struct device_node *np;
+ int ret;
+
+ np = of_parse_phandle(wo->hw->node, "mediatek,wo-ccif", 0);
+ if (!np)
+ return -ENODEV;
+
+ wo->mmio.regs = syscon_regmap_lookup_by_phandle(np, NULL);
+ if (IS_ERR(wo->mmio.regs)) {
+ ret = PTR_ERR(wo->mmio.regs);
+ goto error_put;
+ }
+
+ wo->mmio.irq = irq_of_parse_and_map(np, 0);
+ wo->mmio.irq_mask = MTK_WED_WO_ALL_INT_MASK;
+ spin_lock_init(&wo->mmio.lock);
+ tasklet_setup(&wo->mmio.irq_tasklet, mtk_wed_wo_irq_tasklet);
+
+ ret = devm_request_irq(wo->hw->dev, wo->mmio.irq,
+ mtk_wed_wo_irq_handler, IRQF_TRIGGER_HIGH,
+ KBUILD_MODNAME, wo);
+ if (ret)
+ goto error;
+
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY1;
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY2;
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW4;
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY3;
+
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_tx, MTK_WED_WO_RING_SIZE,
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_TXCH_NUM,
+ &regs);
+ if (ret)
+ goto error;
+
+ mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
+ mtk_wed_wo_queue_reset(wo, &wo->q_tx);
+
+ regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
+ regs.ring_size = MTK_WED_WO_CCIF_DUMMY6;
+ regs.dma_idx = MTK_WED_WO_CCIF_SHADOW8;
+ regs.cpu_idx = MTK_WED_WO_CCIF_DUMMY7;
+
+ ret = mtk_wed_wo_queue_alloc(wo, &wo->q_rx, MTK_WED_WO_RING_SIZE,
+ MTK_WED_WO_CMD_LEN, MTK_WED_WO_RXCH_NUM,
+ &regs);
+ if (ret)
+ goto error;
+
+ mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
+ mtk_wed_wo_queue_reset(wo, &wo->q_rx);
+
+ /* rx queue irqmask */
+ mtk_wed_wo_set_isr(wo, wo->mmio.irq_mask);
+
+ return 0;
+
+error:
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
+error_put:
+ of_node_put(np);
+ return ret;
+}
+
+static void
+mtk_wed_wo_hw_deinit(struct mtk_wed_wo *wo)
+{
+ /* disable interrupts */
+ mtk_wed_wo_set_isr(wo, 0);
+
+ tasklet_disable(&wo->mmio.irq_tasklet);
+
+ disable_irq(wo->mmio.irq);
+ devm_free_irq(wo->hw->dev, wo->mmio.irq, wo);
+
+ mtk_wed_wo_queue_tx_clean(wo, &wo->q_tx);
+ mtk_wed_wo_queue_rx_clean(wo, &wo->q_rx);
+ mtk_wed_wo_queue_free(wo, &wo->q_tx);
+ mtk_wed_wo_queue_free(wo, &wo->q_rx);
+}
+
+int mtk_wed_wo_init(struct mtk_wed_hw *hw)
+{
+ struct mtk_wed_wo *wo;
+ int ret;
+
+ wo = devm_kzalloc(hw->dev, sizeof(*wo), GFP_KERNEL);
+ if (!wo)
+ return -ENOMEM;
+
+ hw->wed_wo = wo;
+ wo->hw = hw;
+
+ ret = mtk_wed_wo_hardware_init(wo);
+ if (ret)
+ return ret;
+
+ ret = mtk_wed_mcu_init(wo);
+ if (ret)
+ return ret;
+
+ return mtk_wed_wo_exception_init(wo);
+}
+
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw)
+{
+ struct mtk_wed_wo *wo = hw->wed_wo;
+
+ mtk_wed_wo_hw_deinit(wo);
+}
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.h b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
new file mode 100644
index 0000000000..7a1a2a28f1
--- /dev/null
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.h
@@ -0,0 +1,282 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2022 Lorenzo Bianconi <lorenzo@kernel.org> */
+
+#ifndef __MTK_WED_WO_H
+#define __MTK_WED_WO_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+
+struct mtk_wed_hw;
+
+struct mtk_wed_mcu_hdr {
+ /* DW0 */
+ u8 version;
+ u8 cmd;
+ __le16 length;
+
+ /* DW1 */
+ __le16 seq;
+ __le16 flag;
+
+ /* DW2 */
+ __le32 status;
+
+ /* DW3 */
+ u8 rsv[20];
+};
+
+struct mtk_wed_wo_log_info {
+ __le32 sn;
+ __le32 total;
+ __le32 rro;
+ __le32 mod;
+};
+
+enum mtk_wed_wo_event {
+ MTK_WED_WO_EVT_LOG_DUMP = 0x1,
+ MTK_WED_WO_EVT_PROFILING = 0x2,
+ MTK_WED_WO_EVT_RXCNT_INFO = 0x3,
+};
+
+#define MTK_WED_MODULE_ID_WO 1
+#define MTK_FW_DL_TIMEOUT 4000000 /* us */
+#define MTK_WOCPU_TIMEOUT 2000000 /* us */
+
+enum {
+ MTK_WED_WARP_CMD_FLAG_RSP = BIT(0),
+ MTK_WED_WARP_CMD_FLAG_NEED_RSP = BIT(1),
+ MTK_WED_WARP_CMD_FLAG_FROM_TO_WO = BIT(2),
+};
+
+#define MTK_WED_WO_CPU_MCUSYS_RESET_ADDR 0x15194050
+#define MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK 0x20
+#define MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK 0x1
+
+enum {
+ MTK_WED_WO_REGION_EMI,
+ MTK_WED_WO_REGION_ILM,
+ MTK_WED_WO_REGION_DATA,
+ MTK_WED_WO_REGION_BOOT,
+ __MTK_WED_WO_REGION_MAX,
+};
+
+enum mtk_wed_wo_state {
+ MTK_WED_WO_STATE_UNDEFINED,
+ MTK_WED_WO_STATE_INIT,
+ MTK_WED_WO_STATE_ENABLE,
+ MTK_WED_WO_STATE_DISABLE,
+ MTK_WED_WO_STATE_HALT,
+ MTK_WED_WO_STATE_GATING,
+ MTK_WED_WO_STATE_SER_RESET,
+ MTK_WED_WO_STATE_WF_RESET,
+};
+
+enum mtk_wed_wo_done_state {
+ MTK_WED_WOIF_UNDEFINED,
+ MTK_WED_WOIF_DISABLE_DONE,
+ MTK_WED_WOIF_TRIGGER_ENABLE,
+ MTK_WED_WOIF_ENABLE_DONE,
+ MTK_WED_WOIF_TRIGGER_GATING,
+ MTK_WED_WOIF_GATING_DONE,
+ MTK_WED_WOIF_TRIGGER_HALT,
+ MTK_WED_WOIF_HALT_DONE,
+};
+
+enum mtk_wed_dummy_cr_idx {
+ MTK_WED_DUMMY_CR_FWDL,
+ MTK_WED_DUMMY_CR_WO_STATUS,
+};
+
+#define MT7981_FIRMWARE_WO "mediatek/mt7981_wo.bin"
+#define MT7986_FIRMWARE_WO0 "mediatek/mt7986_wo_0.bin"
+#define MT7986_FIRMWARE_WO1 "mediatek/mt7986_wo_1.bin"
+
+#define MTK_WO_MCU_CFG_LS_BASE 0
+#define MTK_WO_MCU_CFG_LS_HW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x000)
+#define MTK_WO_MCU_CFG_LS_FW_VER_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x004)
+#define MTK_WO_MCU_CFG_LS_CFG_DBG1_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x00c)
+#define MTK_WO_MCU_CFG_LS_CFG_DBG2_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x010)
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x014)
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_SET_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x018)
+#define MTK_WO_MCU_CFG_LS_WF_MCCR_CLR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x01c)
+#define MTK_WO_MCU_CFG_LS_WF_MCU_CFG_WM_WA_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x050)
+#define MTK_WO_MCU_CFG_LS_WM_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x060)
+#define MTK_WO_MCU_CFG_LS_WA_BOOT_ADDR_ADDR (MTK_WO_MCU_CFG_LS_BASE + 0x064)
+
+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WM_CPU_RSTB_MASK BIT(5)
+#define MTK_WO_MCU_CFG_LS_WF_WM_WA_WA_CPU_RSTB_MASK BIT(0)
+
+#define MTK_WED_WO_RING_SIZE 256
+#define MTK_WED_WO_CMD_LEN 1504
+
+#define MTK_WED_WO_TXCH_NUM 0
+#define MTK_WED_WO_RXCH_NUM 1
+#define MTK_WED_WO_RXCH_WO_EXCEPTION 7
+
+#define MTK_WED_WO_TXCH_INT_MASK BIT(0)
+#define MTK_WED_WO_RXCH_INT_MASK BIT(1)
+#define MTK_WED_WO_EXCEPTION_INT_MASK BIT(7)
+#define MTK_WED_WO_ALL_INT_MASK (MTK_WED_WO_RXCH_INT_MASK | \
+ MTK_WED_WO_EXCEPTION_INT_MASK)
+
+#define MTK_WED_WO_CCIF_BUSY 0x004
+#define MTK_WED_WO_CCIF_START 0x008
+#define MTK_WED_WO_CCIF_TCHNUM 0x00c
+#define MTK_WED_WO_CCIF_RCHNUM 0x010
+#define MTK_WED_WO_CCIF_RCHNUM_MASK GENMASK(7, 0)
+
+#define MTK_WED_WO_CCIF_ACK 0x014
+#define MTK_WED_WO_CCIF_IRQ0_MASK 0x018
+#define MTK_WED_WO_CCIF_IRQ1_MASK 0x01c
+#define MTK_WED_WO_CCIF_DUMMY1 0x020
+#define MTK_WED_WO_CCIF_DUMMY2 0x024
+#define MTK_WED_WO_CCIF_DUMMY3 0x028
+#define MTK_WED_WO_CCIF_DUMMY4 0x02c
+#define MTK_WED_WO_CCIF_SHADOW1 0x030
+#define MTK_WED_WO_CCIF_SHADOW2 0x034
+#define MTK_WED_WO_CCIF_SHADOW3 0x038
+#define MTK_WED_WO_CCIF_SHADOW4 0x03c
+#define MTK_WED_WO_CCIF_DUMMY5 0x050
+#define MTK_WED_WO_CCIF_DUMMY6 0x054
+#define MTK_WED_WO_CCIF_DUMMY7 0x058
+#define MTK_WED_WO_CCIF_DUMMY8 0x05c
+#define MTK_WED_WO_CCIF_SHADOW5 0x060
+#define MTK_WED_WO_CCIF_SHADOW6 0x064
+#define MTK_WED_WO_CCIF_SHADOW7 0x068
+#define MTK_WED_WO_CCIF_SHADOW8 0x06c
+
+#define MTK_WED_WO_CTL_SD_LEN1 GENMASK(13, 0)
+#define MTK_WED_WO_CTL_LAST_SEC1 BIT(14)
+#define MTK_WED_WO_CTL_BURST BIT(15)
+#define MTK_WED_WO_CTL_SD_LEN0_SHIFT 16
+#define MTK_WED_WO_CTL_SD_LEN0 GENMASK(29, 16)
+#define MTK_WED_WO_CTL_LAST_SEC0 BIT(30)
+#define MTK_WED_WO_CTL_DMA_DONE BIT(31)
+#define MTK_WED_WO_INFO_WINFO GENMASK(15, 0)
+
+struct mtk_wed_wo_memory_region {
+ const char *name;
+ void __iomem *addr;
+ phys_addr_t phy_addr;
+ u32 size;
+ bool shared:1;
+ bool consumed:1;
+};
+
+struct mtk_wed_fw_region {
+ __le32 decomp_crc;
+ __le32 decomp_len;
+ __le32 decomp_blk_sz;
+ u8 rsv0[4];
+ __le32 addr;
+ __le32 len;
+ u8 feature_set;
+ u8 rsv1[15];
+} __packed;
+
+struct mtk_wed_fw_trailer {
+ u8 chip_id;
+ u8 eco_code;
+ u8 num_region;
+ u8 format_ver;
+ u8 format_flag;
+ u8 rsv[2];
+ char fw_ver[10];
+ char build_date[15];
+ u32 crc;
+};
+
+struct mtk_wed_wo_queue_regs {
+ u32 desc_base;
+ u32 ring_size;
+ u32 cpu_idx;
+ u32 dma_idx;
+};
+
+struct mtk_wed_wo_queue_desc {
+ __le32 buf0;
+ __le32 ctrl;
+ __le32 buf1;
+ __le32 info;
+ __le32 reserved[4];
+} __packed __aligned(32);
+
+struct mtk_wed_wo_queue_entry {
+ dma_addr_t addr;
+ void *buf;
+ u32 len;
+};
+
+struct mtk_wed_wo_queue {
+ struct mtk_wed_wo_queue_regs regs;
+
+ struct page_frag_cache cache;
+
+ struct mtk_wed_wo_queue_desc *desc;
+ dma_addr_t desc_dma;
+
+ struct mtk_wed_wo_queue_entry *entry;
+
+ u16 head;
+ u16 tail;
+ int n_desc;
+ int queued;
+ int buf_size;
+
+};
+
+struct mtk_wed_wo {
+ struct mtk_wed_hw *hw;
+ struct mtk_wed_wo_memory_region boot;
+
+ struct mtk_wed_wo_queue q_tx;
+ struct mtk_wed_wo_queue q_rx;
+
+ struct {
+ struct mutex mutex;
+ int timeout;
+ u16 seq;
+
+ struct sk_buff_head res_q;
+ wait_queue_head_t wait;
+ } mcu;
+
+ struct {
+ struct regmap *regs;
+
+ spinlock_t lock;
+ struct tasklet_struct irq_tasklet;
+ int irq;
+ u32 irq_mask;
+ } mmio;
+};
+
+static inline int
+mtk_wed_mcu_check_msg(struct mtk_wed_wo *wo, struct sk_buff *skb)
+{
+ struct mtk_wed_mcu_hdr *hdr = (struct mtk_wed_mcu_hdr *)skb->data;
+
+ if (hdr->version)
+ return -EINVAL;
+
+ if (skb->len < sizeof(*hdr) || skb->len != le16_to_cpu(hdr->length))
+ return -EINVAL;
+
+ return 0;
+}
+
+void mtk_wed_mcu_rx_event(struct mtk_wed_wo *wo, struct sk_buff *skb);
+void mtk_wed_mcu_rx_unsolicited_event(struct mtk_wed_wo *wo,
+ struct sk_buff *skb);
+int mtk_wed_mcu_send_msg(struct mtk_wed_wo *wo, int id, int cmd,
+ const void *data, int len, bool wait_resp);
+int mtk_wed_mcu_msg_update(struct mtk_wed_device *dev, int id, void *data,
+ int len);
+int mtk_wed_mcu_init(struct mtk_wed_wo *wo);
+int mtk_wed_wo_init(struct mtk_wed_hw *hw);
+void mtk_wed_wo_deinit(struct mtk_wed_hw *hw);
+int mtk_wed_wo_queue_tx_skb(struct mtk_wed_wo *dev, struct mtk_wed_wo_queue *q,
+ struct sk_buff *skb);
+
+#endif /* __MTK_WED_WO_H */