diff options
Diffstat (limited to 'drivers/net/ethernet/mediatek')
-rw-r--r-- | drivers/net/ethernet/mediatek/Kconfig | 34 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/Makefile | 13 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_eth_path.c | 277 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4390 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1246 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_ppe.c | 911 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_ppe.h | 358 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c | 189 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_ppe_offload.c | 607 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_ppe_regs.h | 152 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_sgmii.c | 159 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_star_emac.c | 1761 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed.c | 1160 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed.h | 139 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed_debugfs.c | 178 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed_ops.c | 8 | ||||
-rw-r--r-- | drivers/net/ethernet/mediatek/mtk_wed_regs.h | 332 |
17 files changed, 11914 insertions, 0 deletions
diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig new file mode 100644 index 000000000..97374fb3e --- /dev/null +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0-only +config NET_VENDOR_MEDIATEK + bool "MediaTek devices" + depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST + help + If you have a Mediatek SoC with ethernet, say Y. + +if NET_VENDOR_MEDIATEK + +config NET_MEDIATEK_SOC_WED + depends on ARCH_MEDIATEK || COMPILE_TEST + def_bool NET_MEDIATEK_SOC != n + +config NET_MEDIATEK_SOC + tristate "MediaTek SoC Gigabit Ethernet support" + depends on NET_DSA || !NET_DSA + select PINCTRL + select PHYLINK + select DIMLIB + select PAGE_POOL + select PAGE_POOL_STATS + help + This driver supports the gigabit ethernet MACs in the + MediaTek SoC family. + +config NET_MEDIATEK_STAR_EMAC + tristate "MediaTek STAR Ethernet MAC support" + select PHYLIB + select REGMAP_MMIO + help + This driver supports the ethernet MAC IP first used on + MediaTek MT85** SoCs. + +endif #NET_VENDOR_MEDIATEK diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile new file mode 100644 index 000000000..45ba09705 --- /dev/null +++ b/drivers/net/ethernet/mediatek/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Mediatek SoCs built-in ethernet macs +# + +obj-$(CONFIG_NET_MEDIATEK_SOC) += mtk_eth.o +mtk_eth-y := mtk_eth_soc.o mtk_sgmii.o mtk_eth_path.o mtk_ppe.o mtk_ppe_debugfs.o mtk_ppe_offload.o +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed.o +ifdef CONFIG_DEBUG_FS +mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o +endif +obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o +obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o diff --git a/drivers/net/ethernet/mediatek/mtk_eth_path.c b/drivers/net/ethernet/mediatek/mtk_eth_path.c new file mode 100644 index 000000000..72648535a --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_eth_path.c @@ -0,0 +1,277 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018-2019 MediaTek Inc. + +/* A library for configuring path from GMAC/GDM to target PHY + * + * Author: Sean Wang <sean.wang@mediatek.com> + * + */ + +#include <linux/phy.h> +#include <linux/regmap.h> + +#include "mtk_eth_soc.h" + +struct mtk_eth_muxc { + const char *name; + int cap_bit; + int (*set_path)(struct mtk_eth *eth, int path); +}; + +static const char *mtk_eth_path_name(int path) +{ + switch (path) { + case MTK_ETH_PATH_GMAC1_RGMII: + return "gmac1_rgmii"; + case MTK_ETH_PATH_GMAC1_TRGMII: + return "gmac1_trgmii"; + case MTK_ETH_PATH_GMAC1_SGMII: + return "gmac1_sgmii"; + case MTK_ETH_PATH_GMAC2_RGMII: + return "gmac2_rgmii"; + case MTK_ETH_PATH_GMAC2_SGMII: + return "gmac2_sgmii"; + case MTK_ETH_PATH_GMAC2_GEPHY: + return "gmac2_gephy"; + case MTK_ETH_PATH_GDM1_ESW: + return "gdm1_esw"; + default: + return "unknown path"; + } +} + +static int set_mux_gdm1_to_gmac1_esw(struct mtk_eth *eth, int path) +{ + bool updated = true; + u32 val, mask, set; + + switch (path) { + case MTK_ETH_PATH_GMAC1_SGMII: + mask = ~(u32)MTK_MUX_TO_ESW; + set = 0; + break; + case MTK_ETH_PATH_GDM1_ESW: + mask = ~(u32)MTK_MUX_TO_ESW; + set = MTK_MUX_TO_ESW; + break; + default: + updated = false; + break; + } + + if (updated) { + val = mtk_r32(eth, MTK_MAC_MISC); + val = (val & mask) | set; + mtk_w32(eth, val, MTK_MAC_MISC); + } + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static int set_mux_gmac2_gmac0_to_gephy(struct mtk_eth *eth, int path) +{ + unsigned int val = 0; + bool updated = true; + + switch (path) { + case MTK_ETH_PATH_GMAC2_GEPHY: + val = ~(u32)GEPHY_MAC_SEL; + break; + default: + updated = false; + break; + } + + if (updated) + regmap_update_bits(eth->infra, INFRA_MISC2, GEPHY_MAC_SEL, val); + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static int set_mux_u3_gmac2_to_qphy(struct mtk_eth *eth, int path) +{ + unsigned int val = 0; + bool updated = true; + + switch (path) { + case MTK_ETH_PATH_GMAC2_SGMII: + val = CO_QPHY_SEL; + break; + default: + updated = false; + break; + } + + if (updated) + regmap_update_bits(eth->infra, INFRA_MISC2, CO_QPHY_SEL, val); + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static int set_mux_gmac1_gmac2_to_sgmii_rgmii(struct mtk_eth *eth, int path) +{ + unsigned int val = 0; + bool updated = true; + + switch (path) { + case MTK_ETH_PATH_GMAC1_SGMII: + val = SYSCFG0_SGMII_GMAC1; + break; + case MTK_ETH_PATH_GMAC2_SGMII: + val = SYSCFG0_SGMII_GMAC2; + break; + case MTK_ETH_PATH_GMAC1_RGMII: + case MTK_ETH_PATH_GMAC2_RGMII: + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= SYSCFG0_SGMII_MASK; + + if ((path == MTK_GMAC1_RGMII && val == SYSCFG0_SGMII_GMAC1) || + (path == MTK_GMAC2_RGMII && val == SYSCFG0_SGMII_GMAC2)) + val = 0; + else + updated = false; + break; + default: + updated = false; + break; + } + + if (updated) + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, val); + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static int set_mux_gmac12_to_gephy_sgmii(struct mtk_eth *eth, int path) +{ + unsigned int val = 0; + bool updated = true; + + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + + switch (path) { + case MTK_ETH_PATH_GMAC1_SGMII: + val |= SYSCFG0_SGMII_GMAC1_V2; + break; + case MTK_ETH_PATH_GMAC2_GEPHY: + val &= ~(u32)SYSCFG0_SGMII_GMAC2_V2; + break; + case MTK_ETH_PATH_GMAC2_SGMII: + val |= SYSCFG0_SGMII_GMAC2_V2; + break; + default: + updated = false; + } + + if (updated) + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, val); + + dev_dbg(eth->dev, "path %s in %s updated = %d\n", + mtk_eth_path_name(path), __func__, updated); + + return 0; +} + +static const struct mtk_eth_muxc mtk_eth_muxc[] = { + { + .name = "mux_gdm1_to_gmac1_esw", + .cap_bit = MTK_ETH_MUX_GDM1_TO_GMAC1_ESW, + .set_path = set_mux_gdm1_to_gmac1_esw, + }, { + .name = "mux_gmac2_gmac0_to_gephy", + .cap_bit = MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY, + .set_path = set_mux_gmac2_gmac0_to_gephy, + }, { + .name = "mux_u3_gmac2_to_qphy", + .cap_bit = MTK_ETH_MUX_U3_GMAC2_TO_QPHY, + .set_path = set_mux_u3_gmac2_to_qphy, + }, { + .name = "mux_gmac1_gmac2_to_sgmii_rgmii", + .cap_bit = MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII, + .set_path = set_mux_gmac1_gmac2_to_sgmii_rgmii, + }, { + .name = "mux_gmac12_to_gephy_sgmii", + .cap_bit = MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII, + .set_path = set_mux_gmac12_to_gephy_sgmii, + }, +}; + +static int mtk_eth_mux_setup(struct mtk_eth *eth, int path) +{ + int i, err = 0; + + if (!MTK_HAS_CAPS(eth->soc->caps, path)) { + dev_err(eth->dev, "path %s isn't support on the SoC\n", + mtk_eth_path_name(path)); + return -EINVAL; + } + + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_MUX)) + return 0; + + /* Setup MUX in path fabric */ + for (i = 0; i < ARRAY_SIZE(mtk_eth_muxc); i++) { + if (MTK_HAS_CAPS(eth->soc->caps, mtk_eth_muxc[i].cap_bit)) { + err = mtk_eth_muxc[i].set_path(eth, path); + if (err) + goto out; + } else { + dev_dbg(eth->dev, "mux %s isn't present on the SoC\n", + mtk_eth_muxc[i].name); + } + } + +out: + return err; +} + +int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id) +{ + int path; + + path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_SGMII : + MTK_ETH_PATH_GMAC2_SGMII; + + /* Setup proper MUXes along the path */ + return mtk_eth_mux_setup(eth, path); +} + +int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id) +{ + int path = 0; + + if (mac_id == 1) + path = MTK_ETH_PATH_GMAC2_GEPHY; + + if (!path) + return -EINVAL; + + /* Setup proper MUXes along the path */ + return mtk_eth_mux_setup(eth, path); +} + +int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id) +{ + int path; + + path = (mac_id == 0) ? MTK_ETH_PATH_GMAC1_RGMII : + MTK_ETH_PATH_GMAC2_RGMII; + + /* Setup proper MUXes along the path */ + return mtk_eth_mux_setup(eth, path); +} + diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c new file mode 100644 index 000000000..17e6ac444 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -0,0 +1,4390 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * + * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> + */ + +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/of_address.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> +#include <linux/clk.h> +#include <linux/pm_runtime.h> +#include <linux/if_vlan.h> +#include <linux/reset.h> +#include <linux/tcp.h> +#include <linux/interrupt.h> +#include <linux/pinctrl/devinfo.h> +#include <linux/phylink.h> +#include <linux/jhash.h> +#include <linux/bitfield.h> +#include <net/dsa.h> + +#include "mtk_eth_soc.h" +#include "mtk_wed.h" + +static int mtk_msg_level = -1; +module_param_named(msg_level, mtk_msg_level, int, 0); +MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); + +#define MTK_ETHTOOL_STAT(x) { #x, \ + offsetof(struct mtk_hw_stats, x) / sizeof(u64) } + +#define MTK_ETHTOOL_XDP_STAT(x) { #x, \ + offsetof(struct mtk_hw_stats, xdp_stats.x) / \ + sizeof(u64) } + +static const struct mtk_reg_map mtk_reg_map = { + .tx_irq_mask = 0x1a1c, + .tx_irq_status = 0x1a18, + .pdma = { + .rx_ptr = 0x0900, + .rx_cnt_cfg = 0x0904, + .pcrx_ptr = 0x0908, + .glo_cfg = 0x0a04, + .rst_idx = 0x0a08, + .delay_irq = 0x0a0c, + .irq_status = 0x0a20, + .irq_mask = 0x0a28, + .int_grp = 0x0a50, + }, + .qdma = { + .qtx_cfg = 0x1800, + .rx_ptr = 0x1900, + .rx_cnt_cfg = 0x1904, + .qcrx_ptr = 0x1908, + .glo_cfg = 0x1a04, + .rst_idx = 0x1a08, + .delay_irq = 0x1a0c, + .fc_th = 0x1a10, + .int_grp = 0x1a20, + .hred = 0x1a44, + .ctx_ptr = 0x1b00, + .dtx_ptr = 0x1b04, + .crx_ptr = 0x1b10, + .drx_ptr = 0x1b14, + .fq_head = 0x1b20, + .fq_tail = 0x1b24, + .fq_count = 0x1b28, + .fq_blen = 0x1b2c, + }, + .gdm1_cnt = 0x2400, + .gdma_to_ppe = 0x4444, + .ppe_base = 0x0c00, + .wdma_base = { + [0] = 0x2800, + [1] = 0x2c00, + }, +}; + +static const struct mtk_reg_map mt7628_reg_map = { + .tx_irq_mask = 0x0a28, + .tx_irq_status = 0x0a20, + .pdma = { + .rx_ptr = 0x0900, + .rx_cnt_cfg = 0x0904, + .pcrx_ptr = 0x0908, + .glo_cfg = 0x0a04, + .rst_idx = 0x0a08, + .delay_irq = 0x0a0c, + .irq_status = 0x0a20, + .irq_mask = 0x0a28, + .int_grp = 0x0a50, + }, +}; + +static const struct mtk_reg_map mt7986_reg_map = { + .tx_irq_mask = 0x461c, + .tx_irq_status = 0x4618, + .pdma = { + .rx_ptr = 0x6100, + .rx_cnt_cfg = 0x6104, + .pcrx_ptr = 0x6108, + .glo_cfg = 0x6204, + .rst_idx = 0x6208, + .delay_irq = 0x620c, + .irq_status = 0x6220, + .irq_mask = 0x6228, + .int_grp = 0x6250, + }, + .qdma = { + .qtx_cfg = 0x4400, + .rx_ptr = 0x4500, + .rx_cnt_cfg = 0x4504, + .qcrx_ptr = 0x4508, + .glo_cfg = 0x4604, + .rst_idx = 0x4608, + .delay_irq = 0x460c, + .fc_th = 0x4610, + .int_grp = 0x4620, + .hred = 0x4644, + .ctx_ptr = 0x4700, + .dtx_ptr = 0x4704, + .crx_ptr = 0x4710, + .drx_ptr = 0x4714, + .fq_head = 0x4720, + .fq_tail = 0x4724, + .fq_count = 0x4728, + .fq_blen = 0x472c, + }, + .gdm1_cnt = 0x1c00, + .gdma_to_ppe = 0x3333, + .ppe_base = 0x2000, + .wdma_base = { + [0] = 0x4800, + [1] = 0x4c00, + }, +}; + +/* strings used by ethtool */ +static const struct mtk_ethtool_stats { + char str[ETH_GSTRING_LEN]; + u32 offset; +} mtk_ethtool_stats[] = { + MTK_ETHTOOL_STAT(tx_bytes), + MTK_ETHTOOL_STAT(tx_packets), + MTK_ETHTOOL_STAT(tx_skip), + MTK_ETHTOOL_STAT(tx_collisions), + MTK_ETHTOOL_STAT(rx_bytes), + MTK_ETHTOOL_STAT(rx_packets), + MTK_ETHTOOL_STAT(rx_overflow), + MTK_ETHTOOL_STAT(rx_fcs_errors), + MTK_ETHTOOL_STAT(rx_short_errors), + MTK_ETHTOOL_STAT(rx_long_errors), + MTK_ETHTOOL_STAT(rx_checksum_errors), + MTK_ETHTOOL_STAT(rx_flow_control_packets), + MTK_ETHTOOL_XDP_STAT(rx_xdp_redirect), + MTK_ETHTOOL_XDP_STAT(rx_xdp_pass), + MTK_ETHTOOL_XDP_STAT(rx_xdp_drop), + MTK_ETHTOOL_XDP_STAT(rx_xdp_tx), + MTK_ETHTOOL_XDP_STAT(rx_xdp_tx_errors), + MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit), + MTK_ETHTOOL_XDP_STAT(tx_xdp_xmit_errors), +}; + +static const char * const mtk_clks_source_name[] = { + "ethif", "sgmiitop", "esw", "gp0", "gp1", "gp2", "fe", "trgpll", + "sgmii_tx250m", "sgmii_rx250m", "sgmii_cdr_ref", "sgmii_cdr_fb", + "sgmii2_tx250m", "sgmii2_rx250m", "sgmii2_cdr_ref", "sgmii2_cdr_fb", + "sgmii_ck", "eth2pll", "wocpu0", "wocpu1", "netsys0", "netsys1" +}; + +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg) +{ + __raw_writel(val, eth->base + reg); +} + +u32 mtk_r32(struct mtk_eth *eth, unsigned reg) +{ + return __raw_readl(eth->base + reg); +} + +static u32 mtk_m32(struct mtk_eth *eth, u32 mask, u32 set, unsigned reg) +{ + u32 val; + + val = mtk_r32(eth, reg); + val &= ~mask; + val |= set; + mtk_w32(eth, val, reg); + return reg; +} + +static int mtk_mdio_busy_wait(struct mtk_eth *eth) +{ + unsigned long t_start = jiffies; + + while (1) { + if (!(mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_ACCESS)) + return 0; + if (time_after(jiffies, t_start + PHY_IAC_TIMEOUT)) + break; + cond_resched(); + } + + dev_err(eth->dev, "mdio: MDIO timeout\n"); + return -ETIMEDOUT; +} + +static int _mtk_mdio_write(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg, + u32 write_data) +{ + int ret; + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + if (phy_reg & MII_ADDR_C45) { + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_C45_ADDR | + PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)), + MTK_PHY_IAC); + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_WRITE | + PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(write_data), + MTK_PHY_IAC); + } else { + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C22 | + PHY_IAC_CMD_WRITE | + PHY_IAC_REG(phy_reg) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(write_data), + MTK_PHY_IAC); + } + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + return 0; +} + +static int _mtk_mdio_read(struct mtk_eth *eth, u32 phy_addr, u32 phy_reg) +{ + int ret; + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + if (phy_reg & MII_ADDR_C45) { + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_C45_ADDR | + PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | + PHY_IAC_ADDR(phy_addr) | + PHY_IAC_DATA(mdiobus_c45_regad(phy_reg)), + MTK_PHY_IAC); + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C45 | + PHY_IAC_CMD_C45_READ | + PHY_IAC_REG(mdiobus_c45_devad(phy_reg)) | + PHY_IAC_ADDR(phy_addr), + MTK_PHY_IAC); + } else { + mtk_w32(eth, PHY_IAC_ACCESS | + PHY_IAC_START_C22 | + PHY_IAC_CMD_C22_READ | + PHY_IAC_REG(phy_reg) | + PHY_IAC_ADDR(phy_addr), + MTK_PHY_IAC); + } + + ret = mtk_mdio_busy_wait(eth); + if (ret < 0) + return ret; + + return mtk_r32(eth, MTK_PHY_IAC) & PHY_IAC_DATA_MASK; +} + +static int mtk_mdio_write(struct mii_bus *bus, int phy_addr, + int phy_reg, u16 val) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_write(eth, phy_addr, phy_reg, val); +} + +static int mtk_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) +{ + struct mtk_eth *eth = bus->priv; + + return _mtk_mdio_read(eth, phy_addr, phy_reg); +} + +static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth, + phy_interface_t interface) +{ + u32 val; + + /* Check DDR memory type. + * Currently TRGMII mode with DDR2 memory is not supported. + */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG, &val); + if (interface == PHY_INTERFACE_MODE_TRGMII && + val & SYSCFG_DRAM_TYPE_DDR2) { + dev_err(eth->dev, + "TRGMII mode with DDR2 memory is not supported!\n"); + return -EOPNOTSUPP; + } + + val = (interface == PHY_INTERFACE_MODE_TRGMII) ? + ETHSYS_TRGMII_MT7621_DDR_PLL : 0; + + regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, + ETHSYS_TRGMII_MT7621_MASK, val); + + return 0; +} + +static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, + phy_interface_t interface, int speed) +{ + u32 val; + int ret; + + if (interface == PHY_INTERFACE_MODE_TRGMII) { + mtk_w32(eth, TRGMII_MODE, INTF_MODE); + val = 500000000; + ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); + if (ret) + dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); + return; + } + + val = (speed == SPEED_1000) ? + INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100; + mtk_w32(eth, val, INTF_MODE); + + regmap_update_bits(eth->ethsys, ETHSYS_CLKCFG0, + ETHSYS_TRGMII_CLK_SEL362_5, + ETHSYS_TRGMII_CLK_SEL362_5); + + val = (speed == SPEED_1000) ? 250000000 : 500000000; + ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val); + if (ret) + dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret); + + val = (speed == SPEED_1000) ? + RCK_CTRL_RGMII_1000 : RCK_CTRL_RGMII_10_100; + mtk_w32(eth, val, TRGMII_RCK_CTRL); + + val = (speed == SPEED_1000) ? + TCK_CTRL_RGMII_1000 : TCK_CTRL_RGMII_10_100; + mtk_w32(eth, val, TRGMII_TCK_CTRL); +} + +static struct phylink_pcs *mtk_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + unsigned int sid; + + if (interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(interface)) { + sid = (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_SGMII)) ? + 0 : mac->id; + + return mtk_sgmii_select_pcs(eth->sgmii, sid); + } + + return NULL; +} + +static void mtk_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + int val, ge_mode, err = 0; + u32 i; + + /* MT76x8 has no hardware settings between for the MAC */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && + mac->interface != state->interface) { + /* Setup soc pin functions */ + switch (state->interface) { + case PHY_INTERFACE_MODE_TRGMII: + if (mac->id) + goto err_phy; + if (!MTK_HAS_CAPS(mac->hw->soc->caps, + MTK_GMAC1_TRGMII)) + goto err_phy; + fallthrough; + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_REVMII: + case PHY_INTERFACE_MODE_RMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RGMII)) { + err = mtk_gmac_rgmii_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + case PHY_INTERFACE_MODE_SGMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + err = mtk_gmac_sgmii_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + case PHY_INTERFACE_MODE_GMII: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_GEPHY)) { + err = mtk_gmac_gephy_path_setup(eth, mac->id); + if (err) + goto init_err; + } + break; + default: + goto err_phy; + } + + /* Setup clock for 1st gmac */ + if (!mac->id && state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_8023z(state->interface) && + MTK_HAS_CAPS(mac->hw->soc->caps, MTK_GMAC1_TRGMII)) { + if (MTK_HAS_CAPS(mac->hw->soc->caps, + MTK_TRGMII_MT7621_CLK)) { + if (mt7621_gmac0_rgmii_adjust(mac->hw, + state->interface)) + goto err_phy; + } else { + /* FIXME: this is incorrect. Not only does it + * use state->speed (which is not guaranteed + * to be correct) but it also makes use of it + * in a code path that will only be reachable + * when the PHY interface mode changes, not + * when the speed changes. Consequently, RGMII + * is probably broken. + */ + mtk_gmac0_rgmii_adjust(mac->hw, + state->interface, + state->speed); + + /* mt7623_pad_clk_setup */ + for (i = 0 ; i < NUM_TRGMII_CTRL; i++) + mtk_w32(mac->hw, + TD_DM_DRVP(8) | TD_DM_DRVN(8), + TRGMII_TD_ODT(i)); + + /* Assert/release MT7623 RXC reset */ + mtk_m32(mac->hw, 0, RXC_RST | RXC_DQSISEL, + TRGMII_RCK_CTRL); + mtk_m32(mac->hw, RXC_RST, 0, TRGMII_RCK_CTRL); + } + } + + ge_mode = 0; + switch (state->interface) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: + ge_mode = 1; + break; + case PHY_INTERFACE_MODE_REVMII: + ge_mode = 2; + break; + case PHY_INTERFACE_MODE_RMII: + if (mac->id) + goto err_phy; + ge_mode = 3; + break; + default: + break; + } + + /* put the gmac into the right mode */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + val &= ~SYSCFG0_GE_MODE(SYSCFG0_GE_MASK, mac->id); + val |= SYSCFG0_GE_MODE(ge_mode, mac->id); + regmap_write(eth->ethsys, ETHSYS_SYSCFG0, val); + + mac->interface = state->interface; + } + + /* SGMII */ + if (state->interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(state->interface)) { + /* The path GMAC to SGMII will be enabled once the SGMIISYS is + * being setup done. + */ + regmap_read(eth->ethsys, ETHSYS_SYSCFG0, &val); + + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, + ~(u32)SYSCFG0_SGMII_MASK); + + /* Save the syscfg0 value for mac_finish */ + mac->syscfg0 = val; + } else if (phylink_autoneg_inband(mode)) { + dev_err(eth->dev, + "In-band mode not supported in non SGMII mode!\n"); + return; + } + + return; + +err_phy: + dev_err(eth->dev, "%s: GMAC%d mode %s not supported!\n", __func__, + mac->id, phy_modes(state->interface)); + return; + +init_err: + dev_err(eth->dev, "%s: GMAC%d mode %s err: %d!\n", __func__, + mac->id, phy_modes(state->interface), err); +} + +static int mtk_mac_finish(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + struct mtk_eth *eth = mac->hw; + u32 mcr_cur, mcr_new; + + /* Enable SGMII */ + if (interface == PHY_INTERFACE_MODE_SGMII || + phy_interface_mode_is_8023z(interface)) + regmap_update_bits(eth->ethsys, ETHSYS_SYSCFG0, + SYSCFG0_SGMII_MASK, mac->syscfg0); + + /* Setup gmac */ + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr_new = mcr_cur; + mcr_new |= MAC_MCR_IPG_CFG | MAC_MCR_FORCE_MODE | + MAC_MCR_BACKOFF_EN | MAC_MCR_BACKPR_EN | MAC_MCR_FORCE_LINK | + MAC_MCR_RX_FIFO_CLR_DIS; + + /* Only update control register when needed! */ + if (mcr_new != mcr_cur) + mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); + + return 0; +} + +static void mtk_mac_pcs_get_state(struct phylink_config *config, + struct phylink_link_state *state) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 pmsr = mtk_r32(mac->hw, MTK_MAC_MSR(mac->id)); + + state->link = (pmsr & MAC_MSR_LINK); + state->duplex = (pmsr & MAC_MSR_DPX) >> 1; + + switch (pmsr & (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100)) { + case 0: + state->speed = SPEED_10; + break; + case MAC_MSR_SPEED_100: + state->speed = SPEED_100; + break; + case MAC_MSR_SPEED_1000: + state->speed = SPEED_1000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; + } + + state->pause &= (MLO_PAUSE_RX | MLO_PAUSE_TX); + if (pmsr & MAC_MSR_RX_FC) + state->pause |= MLO_PAUSE_RX; + if (pmsr & MAC_MSR_TX_FC) + state->pause |= MLO_PAUSE_TX; +} + +static void mtk_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + + mcr &= ~(MAC_MCR_TX_EN | MAC_MCR_RX_EN); + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); +} + +static void mtk_mac_link_up(struct phylink_config *config, + struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct mtk_mac *mac = container_of(config, struct mtk_mac, + phylink_config); + u32 mcr; + + mcr = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr &= ~(MAC_MCR_SPEED_100 | MAC_MCR_SPEED_1000 | + MAC_MCR_FORCE_DPX | MAC_MCR_FORCE_TX_FC | + MAC_MCR_FORCE_RX_FC); + + /* Configure speed */ + switch (speed) { + case SPEED_2500: + case SPEED_1000: + mcr |= MAC_MCR_SPEED_1000; + break; + case SPEED_100: + mcr |= MAC_MCR_SPEED_100; + break; + } + + /* Configure duplex */ + if (duplex == DUPLEX_FULL) + mcr |= MAC_MCR_FORCE_DPX; + + /* Configure pause modes - phylink will avoid these for half duplex */ + if (tx_pause) + mcr |= MAC_MCR_FORCE_TX_FC; + if (rx_pause) + mcr |= MAC_MCR_FORCE_RX_FC; + + mcr |= MAC_MCR_TX_EN | MAC_MCR_RX_EN; + mtk_w32(mac->hw, mcr, MTK_MAC_MCR(mac->id)); +} + +static const struct phylink_mac_ops mtk_phylink_ops = { + .validate = phylink_generic_validate, + .mac_select_pcs = mtk_mac_select_pcs, + .mac_pcs_get_state = mtk_mac_pcs_get_state, + .mac_config = mtk_mac_config, + .mac_finish = mtk_mac_finish, + .mac_link_down = mtk_mac_link_down, + .mac_link_up = mtk_mac_link_up, +}; + +static int mtk_mdio_init(struct mtk_eth *eth) +{ + struct device_node *mii_np; + int ret; + + mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus"); + if (!mii_np) { + dev_err(eth->dev, "no %s child node found", "mdio-bus"); + return -ENODEV; + } + + if (!of_device_is_available(mii_np)) { + ret = -ENODEV; + goto err_put_node; + } + + eth->mii_bus = devm_mdiobus_alloc(eth->dev); + if (!eth->mii_bus) { + ret = -ENOMEM; + goto err_put_node; + } + + eth->mii_bus->name = "mdio"; + eth->mii_bus->read = mtk_mdio_read; + eth->mii_bus->write = mtk_mdio_write; + eth->mii_bus->probe_capabilities = MDIOBUS_C22_C45; + eth->mii_bus->priv = eth; + eth->mii_bus->parent = eth->dev; + + snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%pOFn", mii_np); + ret = of_mdiobus_register(eth->mii_bus, mii_np); + +err_put_node: + of_node_put(mii_np); + return ret; +} + +static void mtk_mdio_cleanup(struct mtk_eth *eth) +{ + if (!eth->mii_bus) + return; + + mdiobus_unregister(eth->mii_bus); +} + +static inline void mtk_tx_irq_disable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->tx_irq_lock, flags); + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); + mtk_w32(eth, val & ~mask, eth->soc->reg_map->tx_irq_mask); + spin_unlock_irqrestore(ð->tx_irq_lock, flags); +} + +static inline void mtk_tx_irq_enable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->tx_irq_lock, flags); + val = mtk_r32(eth, eth->soc->reg_map->tx_irq_mask); + mtk_w32(eth, val | mask, eth->soc->reg_map->tx_irq_mask); + spin_unlock_irqrestore(ð->tx_irq_lock, flags); +} + +static inline void mtk_rx_irq_disable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->rx_irq_lock, flags); + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); + mtk_w32(eth, val & ~mask, eth->soc->reg_map->pdma.irq_mask); + spin_unlock_irqrestore(ð->rx_irq_lock, flags); +} + +static inline void mtk_rx_irq_enable(struct mtk_eth *eth, u32 mask) +{ + unsigned long flags; + u32 val; + + spin_lock_irqsave(ð->rx_irq_lock, flags); + val = mtk_r32(eth, eth->soc->reg_map->pdma.irq_mask); + mtk_w32(eth, val | mask, eth->soc->reg_map->pdma.irq_mask); + spin_unlock_irqrestore(ð->rx_irq_lock, flags); +} + +static int mtk_set_mac_address(struct net_device *dev, void *p) +{ + int ret = eth_mac_addr(dev, p); + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + const char *macaddr = dev->dev_addr; + + if (ret) + return ret; + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + + spin_lock_bh(&mac->hw->page_lock); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MT7628_SDM_MAC_ADRH); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MT7628_SDM_MAC_ADRL); + } else { + mtk_w32(mac->hw, (macaddr[0] << 8) | macaddr[1], + MTK_GDMA_MAC_ADRH(mac->id)); + mtk_w32(mac->hw, (macaddr[2] << 24) | (macaddr[3] << 16) | + (macaddr[4] << 8) | macaddr[5], + MTK_GDMA_MAC_ADRL(mac->id)); + } + spin_unlock_bh(&mac->hw->page_lock); + + return 0; +} + +void mtk_stats_update_mac(struct mtk_mac *mac) +{ + struct mtk_hw_stats *hw_stats = mac->hw_stats; + struct mtk_eth *eth = mac->hw; + + u64_stats_update_begin(&hw_stats->syncp); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + hw_stats->tx_packets += mtk_r32(mac->hw, MT7628_SDM_TPCNT); + hw_stats->tx_bytes += mtk_r32(mac->hw, MT7628_SDM_TBCNT); + hw_stats->rx_packets += mtk_r32(mac->hw, MT7628_SDM_RPCNT); + hw_stats->rx_bytes += mtk_r32(mac->hw, MT7628_SDM_RBCNT); + hw_stats->rx_checksum_errors += + mtk_r32(mac->hw, MT7628_SDM_CS_ERR); + } else { + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + unsigned int offs = hw_stats->reg_offset; + u64 stats; + + hw_stats->rx_bytes += mtk_r32(mac->hw, reg_map->gdm1_cnt + offs); + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x4 + offs); + if (stats) + hw_stats->rx_bytes += (stats << 32); + hw_stats->rx_packets += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x8 + offs); + hw_stats->rx_overflow += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x10 + offs); + hw_stats->rx_fcs_errors += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x14 + offs); + hw_stats->rx_short_errors += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x18 + offs); + hw_stats->rx_long_errors += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x1c + offs); + hw_stats->rx_checksum_errors += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x20 + offs); + hw_stats->rx_flow_control_packets += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x24 + offs); + hw_stats->tx_skip += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x28 + offs); + hw_stats->tx_collisions += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x2c + offs); + hw_stats->tx_bytes += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x30 + offs); + stats = mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x34 + offs); + if (stats) + hw_stats->tx_bytes += (stats << 32); + hw_stats->tx_packets += + mtk_r32(mac->hw, reg_map->gdm1_cnt + 0x38 + offs); + } + + u64_stats_update_end(&hw_stats->syncp); +} + +static void mtk_stats_update(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->mac[i] || !eth->mac[i]->hw_stats) + continue; + if (spin_trylock(ð->mac[i]->hw_stats->stats_lock)) { + mtk_stats_update_mac(eth->mac[i]); + spin_unlock(ð->mac[i]->hw_stats->stats_lock); + } + } +} + +static void mtk_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hw_stats = mac->hw_stats; + unsigned int start; + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock_bh(&hw_stats->stats_lock)) { + mtk_stats_update_mac(mac); + spin_unlock_bh(&hw_stats->stats_lock); + } + } + + do { + start = u64_stats_fetch_begin_irq(&hw_stats->syncp); + storage->rx_packets = hw_stats->rx_packets; + storage->tx_packets = hw_stats->tx_packets; + storage->rx_bytes = hw_stats->rx_bytes; + storage->tx_bytes = hw_stats->tx_bytes; + storage->collisions = hw_stats->tx_collisions; + storage->rx_length_errors = hw_stats->rx_short_errors + + hw_stats->rx_long_errors; + storage->rx_over_errors = hw_stats->rx_overflow; + storage->rx_crc_errors = hw_stats->rx_fcs_errors; + storage->rx_errors = hw_stats->rx_checksum_errors; + storage->tx_aborted_errors = hw_stats->tx_skip; + } while (u64_stats_fetch_retry_irq(&hw_stats->syncp, start)); + + storage->tx_errors = dev->stats.tx_errors; + storage->rx_dropped = dev->stats.rx_dropped; + storage->tx_dropped = dev->stats.tx_dropped; +} + +static inline int mtk_max_frag_size(int mtu) +{ + /* make sure buf_size will be at least MTK_MAX_RX_LENGTH */ + if (mtu + MTK_RX_ETH_HLEN < MTK_MAX_RX_LENGTH_2K) + mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; + + return SKB_DATA_ALIGN(MTK_RX_HLEN + mtu) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +} + +static inline int mtk_max_buf_size(int frag_size) +{ + int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + + WARN_ON(buf_size < MTK_MAX_RX_LENGTH_2K); + + return buf_size; +} + +static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, + struct mtk_rx_dma_v2 *dma_rxd) +{ + rxd->rxd2 = READ_ONCE(dma_rxd->rxd2); + if (!(rxd->rxd2 & RX_DMA_DONE)) + return false; + + rxd->rxd1 = READ_ONCE(dma_rxd->rxd1); + rxd->rxd3 = READ_ONCE(dma_rxd->rxd3); + rxd->rxd4 = READ_ONCE(dma_rxd->rxd4); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + rxd->rxd5 = READ_ONCE(dma_rxd->rxd5); + rxd->rxd6 = READ_ONCE(dma_rxd->rxd6); + } + + return true; +} + +static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask) +{ + unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH); + unsigned long data; + + data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN, + get_order(size)); + + return (void *)data; +} + +/* the qdma core needs scratch memory to be setup */ +static int mtk_init_fq_dma(struct mtk_eth *eth) +{ + const struct mtk_soc_data *soc = eth->soc; + dma_addr_t phy_ring_tail; + int cnt = MTK_DMA_SIZE; + dma_addr_t dma_addr; + int i; + + eth->scratch_ring = dma_alloc_coherent(eth->dma_dev, + cnt * soc->txrx.txd_size, + ð->phy_scratch_ring, + GFP_KERNEL); + if (unlikely(!eth->scratch_ring)) + return -ENOMEM; + + eth->scratch_head = kcalloc(cnt, MTK_QDMA_PAGE_SIZE, GFP_KERNEL); + if (unlikely(!eth->scratch_head)) + return -ENOMEM; + + dma_addr = dma_map_single(eth->dma_dev, + eth->scratch_head, cnt * MTK_QDMA_PAGE_SIZE, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dma_dev, dma_addr))) + return -ENOMEM; + + phy_ring_tail = eth->phy_scratch_ring + soc->txrx.txd_size * (cnt - 1); + + for (i = 0; i < cnt; i++) { + struct mtk_tx_dma_v2 *txd; + + txd = eth->scratch_ring + i * soc->txrx.txd_size; + txd->txd1 = dma_addr + i * MTK_QDMA_PAGE_SIZE; + if (i < cnt - 1) + txd->txd2 = eth->phy_scratch_ring + + (i + 1) * soc->txrx.txd_size; + + txd->txd3 = TX_DMA_PLEN0(MTK_QDMA_PAGE_SIZE); + txd->txd4 = 0; + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { + txd->txd5 = 0; + txd->txd6 = 0; + txd->txd7 = 0; + txd->txd8 = 0; + } + } + + mtk_w32(eth, eth->phy_scratch_ring, soc->reg_map->qdma.fq_head); + mtk_w32(eth, phy_ring_tail, soc->reg_map->qdma.fq_tail); + mtk_w32(eth, (cnt << 16) | cnt, soc->reg_map->qdma.fq_count); + mtk_w32(eth, MTK_QDMA_PAGE_SIZE << 16, soc->reg_map->qdma.fq_blen); + + return 0; +} + +static void *mtk_qdma_phys_to_virt(struct mtk_tx_ring *ring, u32 desc) +{ + return ring->dma + (desc - ring->phys); +} + +static struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring, + void *txd, u32 txd_size) +{ + int idx = (txd - ring->dma) / txd_size; + + return &ring->buf[idx]; +} + +static struct mtk_tx_dma *qdma_to_pdma(struct mtk_tx_ring *ring, + struct mtk_tx_dma *dma) +{ + return ring->dma_pdma - (struct mtk_tx_dma *)ring->dma + dma; +} + +static int txd_to_idx(struct mtk_tx_ring *ring, void *dma, u32 txd_size) +{ + return (dma - ring->dma) / txd_size; +} + +static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, + struct xdp_frame_bulk *bq, bool napi) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) { + dma_unmap_single(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) { + dma_unmap_page(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + } else { + if (dma_unmap_len(tx_buf, dma_len0)) { + dma_unmap_page(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr0), + dma_unmap_len(tx_buf, dma_len0), + DMA_TO_DEVICE); + } + + if (dma_unmap_len(tx_buf, dma_len1)) { + dma_unmap_page(eth->dma_dev, + dma_unmap_addr(tx_buf, dma_addr1), + dma_unmap_len(tx_buf, dma_len1), + DMA_TO_DEVICE); + } + } + + if (tx_buf->data && tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { + if (tx_buf->type == MTK_TYPE_SKB) { + struct sk_buff *skb = tx_buf->data; + + if (napi) + napi_consume_skb(skb, napi); + else + dev_kfree_skb_any(skb); + } else { + struct xdp_frame *xdpf = tx_buf->data; + + if (napi && tx_buf->type == MTK_TYPE_XDP_TX) + xdp_return_frame_rx_napi(xdpf); + else if (bq) + xdp_return_frame_bulk(xdpf, bq); + else + xdp_return_frame(xdpf); + } + } + tx_buf->flags = 0; + tx_buf->data = NULL; +} + +static void setup_tx_buf(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf, + struct mtk_tx_dma *txd, dma_addr_t mapped_addr, + size_t size, int idx) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, size); + } else { + if (idx & 1) { + txd->txd3 = mapped_addr; + txd->txd2 |= TX_DMA_PLEN1(size); + dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len1, size); + } else { + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; + txd->txd1 = mapped_addr; + txd->txd2 = TX_DMA_PLEN0(size); + dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); + dma_unmap_len_set(tx_buf, dma_len0, size); + } + } +} + +static void mtk_tx_set_dma_desc_v1(struct net_device *dev, void *txd, + struct mtk_tx_dma_desc_info *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_tx_dma *desc = txd; + u32 data; + + WRITE_ONCE(desc->txd1, info->addr); + + data = TX_DMA_SWC | TX_DMA_PLEN0(info->size); + if (info->last) + data |= TX_DMA_LS0; + WRITE_ONCE(desc->txd3, data); + + data = (mac->id + 1) << TX_DMA_FPORT_SHIFT; /* forward port */ + if (info->first) { + if (info->gso) + data |= TX_DMA_TSO; + /* tx checksum offload */ + if (info->csum) + data |= TX_DMA_CHKSUM; + /* vlan header offload */ + if (info->vlan) + data |= TX_DMA_INS_VLAN | info->vlan_tci; + } + WRITE_ONCE(desc->txd4, data); +} + +static void mtk_tx_set_dma_desc_v2(struct net_device *dev, void *txd, + struct mtk_tx_dma_desc_info *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_tx_dma_v2 *desc = txd; + struct mtk_eth *eth = mac->hw; + u32 data; + + WRITE_ONCE(desc->txd1, info->addr); + + data = TX_DMA_PLEN0(info->size); + if (info->last) + data |= TX_DMA_LS0; + WRITE_ONCE(desc->txd3, data); + + if (!info->qid && mac->id) + info->qid = MTK_QDMA_GMAC2_QID; + + data = (mac->id + 1) << TX_DMA_FPORT_SHIFT_V2; /* forward port */ + data |= TX_DMA_SWC_V2 | QID_BITS_V2(info->qid); + WRITE_ONCE(desc->txd4, data); + + data = 0; + if (info->first) { + if (info->gso) + data |= TX_DMA_TSO_V2; + /* tx checksum offload */ + if (info->csum) + data |= TX_DMA_CHKSUM_V2; + } + WRITE_ONCE(desc->txd5, data); + + data = 0; + if (info->first && info->vlan) + data |= TX_DMA_INS_VLAN_V2 | info->vlan_tci; + WRITE_ONCE(desc->txd6, data); + + WRITE_ONCE(desc->txd7, 0); + WRITE_ONCE(desc->txd8, 0); +} + +static void mtk_tx_set_dma_desc(struct net_device *dev, void *txd, + struct mtk_tx_dma_desc_info *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + mtk_tx_set_dma_desc_v2(dev, txd, info); + else + mtk_tx_set_dma_desc_v1(dev, txd, info); +} + +static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev, + int tx_num, struct mtk_tx_ring *ring, bool gso) +{ + struct mtk_tx_dma_desc_info txd_info = { + .size = skb_headlen(skb), + .gso = gso, + .csum = skb->ip_summed == CHECKSUM_PARTIAL, + .vlan = skb_vlan_tag_present(skb), + .qid = skb->mark & MTK_QDMA_TX_MASK, + .vlan_tci = skb_vlan_tag_get(skb), + .first = true, + .last = !skb_is_nonlinear(skb), + }; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + const struct mtk_soc_data *soc = eth->soc; + struct mtk_tx_dma *itxd, *txd; + struct mtk_tx_dma *itxd_pdma, *txd_pdma; + struct mtk_tx_buf *itx_buf, *tx_buf; + int i, n_desc = 1; + int k = 0; + + itxd = ring->next_free; + itxd_pdma = qdma_to_pdma(ring, itxd); + if (itxd == ring->last_free) + return -ENOMEM; + + itx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); + memset(itx_buf, 0, sizeof(*itx_buf)); + + txd_info.addr = dma_map_single(eth->dma_dev, skb->data, txd_info.size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) + return -ENOMEM; + + mtk_tx_set_dma_desc(dev, itxd, &txd_info); + + itx_buf->flags |= MTK_TX_FLAGS_SINGLE0; + itx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : + MTK_TX_FLAGS_FPORT1; + setup_tx_buf(eth, itx_buf, itxd_pdma, txd_info.addr, txd_info.size, + k++); + + /* TX SG offload */ + txd = itxd; + txd_pdma = qdma_to_pdma(ring, txd); + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + unsigned int offset = 0; + int frag_size = skb_frag_size(frag); + + while (frag_size) { + bool new_desc = true; + + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || + (i & 0x1)) { + txd = mtk_qdma_phys_to_virt(ring, txd->txd2); + txd_pdma = qdma_to_pdma(ring, txd); + if (txd == ring->last_free) + goto err_dma; + + n_desc++; + } else { + new_desc = false; + } + + memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); + txd_info.size = min_t(unsigned int, frag_size, + soc->txrx.dma_max_len); + txd_info.qid = skb->mark & MTK_QDMA_TX_MASK; + txd_info.last = i == skb_shinfo(skb)->nr_frags - 1 && + !(frag_size - txd_info.size); + txd_info.addr = skb_frag_dma_map(eth->dma_dev, frag, + offset, txd_info.size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info.addr))) + goto err_dma; + + mtk_tx_set_dma_desc(dev, txd, &txd_info); + + tx_buf = mtk_desc_to_tx_buf(ring, txd, + soc->txrx.txd_size); + if (new_desc) + memset(tx_buf, 0, sizeof(*tx_buf)); + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; + tx_buf->flags |= MTK_TX_FLAGS_PAGE0; + tx_buf->flags |= (!mac->id) ? MTK_TX_FLAGS_FPORT0 : + MTK_TX_FLAGS_FPORT1; + + setup_tx_buf(eth, tx_buf, txd_pdma, txd_info.addr, + txd_info.size, k++); + + frag_size -= txd_info.size; + offset += txd_info.size; + } + } + + /* store skb to cleanup */ + itx_buf->type = MTK_TYPE_SKB; + itx_buf->data = skb; + + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + if (k & 0x1) + txd_pdma->txd2 |= TX_DMA_LS0; + else + txd_pdma->txd2 |= TX_DMA_LS1; + } + + netdev_sent_queue(dev, skb->len); + skb_tx_timestamp(skb); + + ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); + atomic_sub(n_desc, &ring->free_count); + + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || + !netdev_xmit_more()) + mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); + } else { + int next_idx; + + next_idx = NEXT_DESP_IDX(txd_to_idx(ring, txd, soc->txrx.txd_size), + ring->dma_size); + mtk_w32(eth, next_idx, MT7628_TX_CTX_IDX0); + } + + return 0; + +err_dma: + do { + tx_buf = mtk_desc_to_tx_buf(ring, itxd, soc->txrx.txd_size); + + /* unmap dma */ + mtk_tx_unmap(eth, tx_buf, NULL, false); + + itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) + itxd_pdma->txd2 = TX_DMA_DESP2_DEF; + + itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2); + itxd_pdma = qdma_to_pdma(ring, itxd); + } while (itxd != txd); + + return -ENOMEM; +} + +static int mtk_cal_txd_req(struct mtk_eth *eth, struct sk_buff *skb) +{ + int i, nfrags = 1; + skb_frag_t *frag; + + if (skb_is_gso(skb)) { + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + frag = &skb_shinfo(skb)->frags[i]; + nfrags += DIV_ROUND_UP(skb_frag_size(frag), + eth->soc->txrx.dma_max_len); + } + } else { + nfrags += skb_shinfo(skb)->nr_frags; + } + + return nfrags; +} + +static int mtk_queue_stopped(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + if (netif_queue_stopped(eth->netdev[i])) + return 1; + } + + return 0; +} + +static void mtk_wake_queue(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + netif_wake_queue(eth->netdev[i]); + } +} + +static netdev_tx_t mtk_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct mtk_tx_ring *ring = ð->tx_ring; + struct net_device_stats *stats = &dev->stats; + bool gso = false; + int tx_num; + + /* normally we can rely on the stack not calling this more than once, + * however we have 2 queues running on the same ring so we need to lock + * the ring access + */ + spin_lock(ð->page_lock); + + if (unlikely(test_bit(MTK_RESETTING, ð->state))) + goto drop; + + tx_num = mtk_cal_txd_req(eth, skb); + if (unlikely(atomic_read(&ring->free_count) <= tx_num)) { + netif_stop_queue(dev); + netif_err(eth, tx_queued, dev, + "Tx Ring full when queue awake!\n"); + spin_unlock(ð->page_lock); + return NETDEV_TX_BUSY; + } + + /* TSO: fill MSS info in tcp checksum field */ + if (skb_is_gso(skb)) { + if (skb_cow_head(skb, 0)) { + netif_warn(eth, tx_err, dev, + "GSO expand head fail.\n"); + goto drop; + } + + if (skb_shinfo(skb)->gso_type & + (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + gso = true; + tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); + } + } + + if (mtk_tx_map(skb, dev, tx_num, ring, gso) < 0) + goto drop; + + if (unlikely(atomic_read(&ring->free_count) <= ring->thresh)) + netif_stop_queue(dev); + + spin_unlock(ð->page_lock); + + return NETDEV_TX_OK; + +drop: + spin_unlock(ð->page_lock); + stats->tx_dropped++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +} + +static struct mtk_rx_ring *mtk_get_rx_ring(struct mtk_eth *eth) +{ + int i; + struct mtk_rx_ring *ring; + int idx; + + if (!eth->hwlro) + return ð->rx_ring[0]; + + for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { + struct mtk_rx_dma *rxd; + + ring = ð->rx_ring[i]; + idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); + rxd = ring->dma + idx * eth->soc->txrx.rxd_size; + if (rxd->rxd2 & RX_DMA_DONE) { + ring->calc_idx_update = true; + return ring; + } + } + + return NULL; +} + +static void mtk_update_rx_cpu_idx(struct mtk_eth *eth) +{ + struct mtk_rx_ring *ring; + int i; + + if (!eth->hwlro) { + ring = ð->rx_ring[0]; + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + } else { + for (i = 0; i < MTK_MAX_RX_RING_NUM; i++) { + ring = ð->rx_ring[i]; + if (ring->calc_idx_update) { + ring->calc_idx_update = false; + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + } + } + } +} + +static bool mtk_page_pool_enabled(struct mtk_eth *eth) +{ + return MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2); +} + +static struct page_pool *mtk_create_page_pool(struct mtk_eth *eth, + struct xdp_rxq_info *xdp_q, + int id, int size) +{ + struct page_pool_params pp_params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .pool_size = size, + .nid = NUMA_NO_NODE, + .dev = eth->dma_dev, + .offset = MTK_PP_HEADROOM, + .max_len = MTK_PP_MAX_BUF_SIZE, + }; + struct page_pool *pp; + int err; + + pp_params.dma_dir = rcu_access_pointer(eth->prog) ? DMA_BIDIRECTIONAL + : DMA_FROM_DEVICE; + pp = page_pool_create(&pp_params); + if (IS_ERR(pp)) + return pp; + + err = __xdp_rxq_info_reg(xdp_q, ð->dummy_dev, id, + eth->rx_napi.napi_id, PAGE_SIZE); + if (err < 0) + goto err_free_pp; + + err = xdp_rxq_info_reg_mem_model(xdp_q, MEM_TYPE_PAGE_POOL, pp); + if (err) + goto err_unregister_rxq; + + return pp; + +err_unregister_rxq: + xdp_rxq_info_unreg(xdp_q); +err_free_pp: + page_pool_destroy(pp); + + return ERR_PTR(err); +} + +static void *mtk_page_pool_get_buff(struct page_pool *pp, dma_addr_t *dma_addr, + gfp_t gfp_mask) +{ + struct page *page; + + page = page_pool_alloc_pages(pp, gfp_mask | __GFP_NOWARN); + if (!page) + return NULL; + + *dma_addr = page_pool_get_dma_addr(page) + MTK_PP_HEADROOM; + return page_address(page); +} + +static void mtk_rx_put_buff(struct mtk_rx_ring *ring, void *data, bool napi) +{ + if (ring->page_pool) + page_pool_put_full_page(ring->page_pool, + virt_to_head_page(data), napi); + else + skb_free_frag(data); +} + +static int mtk_xdp_frame_map(struct mtk_eth *eth, struct net_device *dev, + struct mtk_tx_dma_desc_info *txd_info, + struct mtk_tx_dma *txd, struct mtk_tx_buf *tx_buf, + void *data, u16 headroom, int index, bool dma_map) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_tx_dma *txd_pdma; + + if (dma_map) { /* ndo_xdp_xmit */ + txd_info->addr = dma_map_single(eth->dma_dev, data, + txd_info->size, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(eth->dma_dev, txd_info->addr))) + return -ENOMEM; + + tx_buf->flags |= MTK_TX_FLAGS_SINGLE0; + } else { + struct page *page = virt_to_head_page(data); + + txd_info->addr = page_pool_get_dma_addr(page) + + sizeof(struct xdp_frame) + headroom; + dma_sync_single_for_device(eth->dma_dev, txd_info->addr, + txd_info->size, DMA_BIDIRECTIONAL); + } + mtk_tx_set_dma_desc(dev, txd, txd_info); + + tx_buf->flags |= !mac->id ? MTK_TX_FLAGS_FPORT0 : MTK_TX_FLAGS_FPORT1; + tx_buf->type = dma_map ? MTK_TYPE_XDP_NDO : MTK_TYPE_XDP_TX; + tx_buf->data = (void *)MTK_DMA_DUMMY_DESC; + + txd_pdma = qdma_to_pdma(ring, txd); + setup_tx_buf(eth, tx_buf, txd_pdma, txd_info->addr, txd_info->size, + index); + + return 0; +} + +static int mtk_xdp_submit_frame(struct mtk_eth *eth, struct xdp_frame *xdpf, + struct net_device *dev, bool dma_map) +{ + struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf); + const struct mtk_soc_data *soc = eth->soc; + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_tx_dma_desc_info txd_info = { + .size = xdpf->len, + .first = true, + .last = !xdp_frame_has_frags(xdpf), + }; + int err, index = 0, n_desc = 1, nr_frags; + struct mtk_tx_buf *htx_buf, *tx_buf; + struct mtk_tx_dma *htxd, *txd; + void *data = xdpf->data; + + if (unlikely(test_bit(MTK_RESETTING, ð->state))) + return -EBUSY; + + nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0; + if (unlikely(atomic_read(&ring->free_count) <= 1 + nr_frags)) + return -EBUSY; + + spin_lock(ð->page_lock); + + txd = ring->next_free; + if (txd == ring->last_free) { + spin_unlock(ð->page_lock); + return -ENOMEM; + } + htxd = txd; + + tx_buf = mtk_desc_to_tx_buf(ring, txd, soc->txrx.txd_size); + memset(tx_buf, 0, sizeof(*tx_buf)); + htx_buf = tx_buf; + + for (;;) { + err = mtk_xdp_frame_map(eth, dev, &txd_info, txd, tx_buf, + data, xdpf->headroom, index, dma_map); + if (err < 0) + goto unmap; + + if (txd_info.last) + break; + + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA) || (index & 0x1)) { + txd = mtk_qdma_phys_to_virt(ring, txd->txd2); + if (txd == ring->last_free) + goto unmap; + + tx_buf = mtk_desc_to_tx_buf(ring, txd, + soc->txrx.txd_size); + memset(tx_buf, 0, sizeof(*tx_buf)); + n_desc++; + } + + memset(&txd_info, 0, sizeof(struct mtk_tx_dma_desc_info)); + txd_info.size = skb_frag_size(&sinfo->frags[index]); + txd_info.last = index + 1 == nr_frags; + data = skb_frag_address(&sinfo->frags[index]); + + index++; + } + /* store xdpf for cleanup */ + htx_buf->data = xdpf; + + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, txd); + + if (index & 1) + txd_pdma->txd2 |= TX_DMA_LS0; + else + txd_pdma->txd2 |= TX_DMA_LS1; + } + + ring->next_free = mtk_qdma_phys_to_virt(ring, txd->txd2); + atomic_sub(n_desc, &ring->free_count); + + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + mtk_w32(eth, txd->txd2, soc->reg_map->qdma.ctx_ptr); + } else { + int idx; + + idx = txd_to_idx(ring, txd, soc->txrx.txd_size); + mtk_w32(eth, NEXT_DESP_IDX(idx, ring->dma_size), + MT7628_TX_CTX_IDX0); + } + + spin_unlock(ð->page_lock); + + return 0; + +unmap: + while (htxd != txd) { + tx_buf = mtk_desc_to_tx_buf(ring, htxd, soc->txrx.txd_size); + mtk_tx_unmap(eth, tx_buf, NULL, false); + + htxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + struct mtk_tx_dma *txd_pdma = qdma_to_pdma(ring, htxd); + + txd_pdma->txd2 = TX_DMA_DESP2_DEF; + } + + htxd = mtk_qdma_phys_to_virt(ring, htxd->txd2); + } + + spin_unlock(ð->page_lock); + + return err; +} + +static int mtk_xdp_xmit(struct net_device *dev, int num_frame, + struct xdp_frame **frames, u32 flags) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hw_stats = mac->hw_stats; + struct mtk_eth *eth = mac->hw; + int i, nxmit = 0; + + if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) + return -EINVAL; + + for (i = 0; i < num_frame; i++) { + if (mtk_xdp_submit_frame(eth, frames[i], dev, true)) + break; + nxmit++; + } + + u64_stats_update_begin(&hw_stats->syncp); + hw_stats->xdp_stats.tx_xdp_xmit += nxmit; + hw_stats->xdp_stats.tx_xdp_xmit_errors += num_frame - nxmit; + u64_stats_update_end(&hw_stats->syncp); + + return nxmit; +} + +static u32 mtk_xdp_run(struct mtk_eth *eth, struct mtk_rx_ring *ring, + struct xdp_buff *xdp, struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hw_stats = mac->hw_stats; + u64 *count = &hw_stats->xdp_stats.rx_xdp_drop; + struct bpf_prog *prog; + u32 act = XDP_PASS; + + rcu_read_lock(); + + prog = rcu_dereference(eth->prog); + if (!prog) + goto out; + + act = bpf_prog_run_xdp(prog, xdp); + switch (act) { + case XDP_PASS: + count = &hw_stats->xdp_stats.rx_xdp_pass; + goto update_stats; + case XDP_REDIRECT: + if (unlikely(xdp_do_redirect(dev, xdp, prog))) { + act = XDP_DROP; + break; + } + + count = &hw_stats->xdp_stats.rx_xdp_redirect; + goto update_stats; + case XDP_TX: { + struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp); + + if (!xdpf || mtk_xdp_submit_frame(eth, xdpf, dev, false)) { + count = &hw_stats->xdp_stats.rx_xdp_tx_errors; + act = XDP_DROP; + break; + } + + count = &hw_stats->xdp_stats.rx_xdp_tx; + goto update_stats; + } + default: + bpf_warn_invalid_xdp_action(dev, prog, act); + fallthrough; + case XDP_ABORTED: + trace_xdp_exception(dev, prog, act); + fallthrough; + case XDP_DROP: + break; + } + + page_pool_put_full_page(ring->page_pool, + virt_to_head_page(xdp->data), true); + +update_stats: + u64_stats_update_begin(&hw_stats->syncp); + *count = *count + 1; + u64_stats_update_end(&hw_stats->syncp); +out: + rcu_read_unlock(); + + return act; +} + +static int mtk_poll_rx(struct napi_struct *napi, int budget, + struct mtk_eth *eth) +{ + struct dim_sample dim_sample = {}; + struct mtk_rx_ring *ring; + bool xdp_flush = false; + int idx; + struct sk_buff *skb; + u8 *data, *new_data; + struct mtk_rx_dma_v2 *rxd, trxd; + int done = 0, bytes = 0; + + while (done < budget) { + unsigned int pktlen, *rxdcsum; + struct net_device *netdev; + dma_addr_t dma_addr; + u32 hash, reason; + int mac = 0; + + ring = mtk_get_rx_ring(eth); + if (unlikely(!ring)) + goto rx_done; + + idx = NEXT_DESP_IDX(ring->calc_idx, ring->dma_size); + rxd = ring->dma + idx * eth->soc->txrx.rxd_size; + data = ring->data[idx]; + + if (!mtk_rx_get_desc(eth, &trxd, rxd)) + break; + + /* find out which mac the packet come from. values start at 1 */ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + mac = RX_DMA_GET_SPORT_V2(trxd.rxd5) - 1; + else if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628) && + !(trxd.rxd4 & RX_DMA_SPECIAL_TAG)) + mac = RX_DMA_GET_SPORT(trxd.rxd4) - 1; + + if (unlikely(mac < 0 || mac >= MTK_MAC_COUNT || + !eth->netdev[mac])) + goto release_desc; + + netdev = eth->netdev[mac]; + + if (unlikely(test_bit(MTK_RESETTING, ð->state))) + goto release_desc; + + pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); + + /* alloc new buffer */ + if (ring->page_pool) { + struct page *page = virt_to_head_page(data); + struct xdp_buff xdp; + u32 ret; + + new_data = mtk_page_pool_get_buff(ring->page_pool, + &dma_addr, + GFP_ATOMIC); + if (unlikely(!new_data)) { + netdev->stats.rx_dropped++; + goto release_desc; + } + + dma_sync_single_for_cpu(eth->dma_dev, + page_pool_get_dma_addr(page) + MTK_PP_HEADROOM, + pktlen, page_pool_get_dma_dir(ring->page_pool)); + + xdp_init_buff(&xdp, PAGE_SIZE, &ring->xdp_q); + xdp_prepare_buff(&xdp, data, MTK_PP_HEADROOM, pktlen, + false); + xdp_buff_clear_frags_flag(&xdp); + + ret = mtk_xdp_run(eth, ring, &xdp, netdev); + if (ret == XDP_REDIRECT) + xdp_flush = true; + + if (ret != XDP_PASS) + goto skip_rx; + + skb = build_skb(data, PAGE_SIZE); + if (unlikely(!skb)) { + page_pool_put_full_page(ring->page_pool, + page, true); + netdev->stats.rx_dropped++; + goto skip_rx; + } + + skb_reserve(skb, xdp.data - xdp.data_hard_start); + skb_put(skb, xdp.data_end - xdp.data); + skb_mark_for_recycle(skb); + } else { + if (ring->frag_size <= PAGE_SIZE) + new_data = napi_alloc_frag(ring->frag_size); + else + new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC); + + if (unlikely(!new_data)) { + netdev->stats.rx_dropped++; + goto release_desc; + } + + dma_addr = dma_map_single(eth->dma_dev, + new_data + NET_SKB_PAD + eth->ip_align, + ring->buf_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dma_dev, + dma_addr))) { + skb_free_frag(new_data); + netdev->stats.rx_dropped++; + goto release_desc; + } + + dma_unmap_single(eth->dma_dev, trxd.rxd1, + ring->buf_size, DMA_FROM_DEVICE); + + skb = build_skb(data, ring->frag_size); + if (unlikely(!skb)) { + netdev->stats.rx_dropped++; + skb_free_frag(data); + goto skip_rx; + } + + skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); + skb_put(skb, pktlen); + } + + skb->dev = netdev; + bytes += skb->len; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + reason = FIELD_GET(MTK_RXD5_PPE_CPU_REASON, trxd.rxd5); + hash = trxd.rxd5 & MTK_RXD5_FOE_ENTRY; + if (hash != MTK_RXD5_FOE_ENTRY) + skb_set_hash(skb, jhash_1word(hash, 0), + PKT_HASH_TYPE_L4); + rxdcsum = &trxd.rxd3; + } else { + reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4); + hash = trxd.rxd4 & MTK_RXD4_FOE_ENTRY; + if (hash != MTK_RXD4_FOE_ENTRY) + skb_set_hash(skb, jhash_1word(hash, 0), + PKT_HASH_TYPE_L4); + rxdcsum = &trxd.rxd4; + } + + if (*rxdcsum & eth->soc->txrx.rx_dma_l4_valid) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, netdev); + + if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) + mtk_ppe_check_skb(eth->ppe[0], skb, hash); + + if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + if (trxd.rxd3 & RX_DMA_VTAG_V2) + __vlan_hwaccel_put_tag(skb, + htons(RX_DMA_VPID(trxd.rxd4)), + RX_DMA_VID(trxd.rxd4)); + } else if (trxd.rxd2 & RX_DMA_VTAG) { + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + RX_DMA_VID(trxd.rxd3)); + } + + /* If the device is attached to a dsa switch, the special + * tag inserted in VLAN field by hw switch can * be offloaded + * by RX HW VLAN offload. Clear vlan info. + */ + if (netdev_uses_dsa(netdev)) + __vlan_hwaccel_clear_tag(skb); + } + + skb_record_rx_queue(skb, 0); + napi_gro_receive(napi, skb); + +skip_rx: + ring->data[idx] = new_data; + rxd->rxd1 = (unsigned int)dma_addr; +release_desc: + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + rxd->rxd2 = RX_DMA_LSO; + else + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); + + ring->calc_idx = idx; + done++; + } + +rx_done: + if (done) { + /* make sure that all changes to the dma ring are flushed before + * we continue + */ + wmb(); + mtk_update_rx_cpu_idx(eth); + } + + eth->rx_packets += done; + eth->rx_bytes += bytes; + dim_update_sample(eth->rx_events, eth->rx_packets, eth->rx_bytes, + &dim_sample); + net_dim(ð->rx_dim, dim_sample); + + if (xdp_flush) + xdp_do_flush_map(); + + return done; +} + +static int mtk_poll_tx_qdma(struct mtk_eth *eth, int budget, + unsigned int *done, unsigned int *bytes) +{ + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_tx_buf *tx_buf; + struct xdp_frame_bulk bq; + struct mtk_tx_dma *desc; + u32 cpu, dma; + + cpu = ring->last_free_ptr; + dma = mtk_r32(eth, reg_map->qdma.drx_ptr); + + desc = mtk_qdma_phys_to_virt(ring, cpu); + xdp_frame_bulk_init(&bq); + + while ((cpu != dma) && budget) { + u32 next_cpu = desc->txd2; + int mac = 0; + + desc = mtk_qdma_phys_to_virt(ring, desc->txd2); + if ((desc->txd3 & TX_DMA_OWNER_CPU) == 0) + break; + + tx_buf = mtk_desc_to_tx_buf(ring, desc, + eth->soc->txrx.txd_size); + if (tx_buf->flags & MTK_TX_FLAGS_FPORT1) + mac = 1; + + if (!tx_buf->data) + break; + + if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { + if (tx_buf->type == MTK_TYPE_SKB) { + struct sk_buff *skb = tx_buf->data; + + bytes[mac] += skb->len; + done[mac]++; + } + budget--; + } + mtk_tx_unmap(eth, tx_buf, &bq, true); + + ring->last_free = desc; + atomic_inc(&ring->free_count); + + cpu = next_cpu; + } + xdp_flush_frame_bulk(&bq); + + ring->last_free_ptr = cpu; + mtk_w32(eth, cpu, reg_map->qdma.crx_ptr); + + return budget; +} + +static int mtk_poll_tx_pdma(struct mtk_eth *eth, int budget, + unsigned int *done, unsigned int *bytes) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct mtk_tx_buf *tx_buf; + struct xdp_frame_bulk bq; + struct mtk_tx_dma *desc; + u32 cpu, dma; + + cpu = ring->cpu_idx; + dma = mtk_r32(eth, MT7628_TX_DTX_IDX0); + xdp_frame_bulk_init(&bq); + + while ((cpu != dma) && budget) { + tx_buf = &ring->buf[cpu]; + if (!tx_buf->data) + break; + + if (tx_buf->data != (void *)MTK_DMA_DUMMY_DESC) { + if (tx_buf->type == MTK_TYPE_SKB) { + struct sk_buff *skb = tx_buf->data; + + bytes[0] += skb->len; + done[0]++; + } + budget--; + } + mtk_tx_unmap(eth, tx_buf, &bq, true); + + desc = ring->dma + cpu * eth->soc->txrx.txd_size; + ring->last_free = desc; + atomic_inc(&ring->free_count); + + cpu = NEXT_DESP_IDX(cpu, ring->dma_size); + } + xdp_flush_frame_bulk(&bq); + + ring->cpu_idx = cpu; + + return budget; +} + +static int mtk_poll_tx(struct mtk_eth *eth, int budget) +{ + struct mtk_tx_ring *ring = ð->tx_ring; + struct dim_sample dim_sample = {}; + unsigned int done[MTK_MAX_DEVS]; + unsigned int bytes[MTK_MAX_DEVS]; + int total = 0, i; + + memset(done, 0, sizeof(done)); + memset(bytes, 0, sizeof(bytes)); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + budget = mtk_poll_tx_qdma(eth, budget, done, bytes); + else + budget = mtk_poll_tx_pdma(eth, budget, done, bytes); + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i] || !done[i]) + continue; + netdev_completed_queue(eth->netdev[i], done[i], bytes[i]); + total += done[i]; + eth->tx_packets += done[i]; + eth->tx_bytes += bytes[i]; + } + + dim_update_sample(eth->tx_events, eth->tx_packets, eth->tx_bytes, + &dim_sample); + net_dim(ð->tx_dim, dim_sample); + + if (mtk_queue_stopped(eth) && + (atomic_read(&ring->free_count) > ring->thresh)) + mtk_wake_queue(eth); + + return total; +} + +static void mtk_handle_status_irq(struct mtk_eth *eth) +{ + u32 status2 = mtk_r32(eth, MTK_INT_STATUS2); + + if (unlikely(status2 & (MTK_GDM1_AF | MTK_GDM2_AF))) { + mtk_stats_update(eth); + mtk_w32(eth, (MTK_GDM1_AF | MTK_GDM2_AF), + MTK_INT_STATUS2); + } +} + +static int mtk_napi_tx(struct napi_struct *napi, int budget) +{ + struct mtk_eth *eth = container_of(napi, struct mtk_eth, tx_napi); + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + int tx_done = 0; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_handle_status_irq(eth); + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->tx_irq_status); + tx_done = mtk_poll_tx(eth, budget); + + if (unlikely(netif_msg_intr(eth))) { + dev_info(eth->dev, + "done tx %d, intr 0x%08x/0x%x\n", tx_done, + mtk_r32(eth, reg_map->tx_irq_status), + mtk_r32(eth, reg_map->tx_irq_mask)); + } + + if (tx_done == budget) + return budget; + + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) + return budget; + + if (napi_complete_done(napi, tx_done)) + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + + return tx_done; +} + +static int mtk_napi_rx(struct napi_struct *napi, int budget) +{ + struct mtk_eth *eth = container_of(napi, struct mtk_eth, rx_napi); + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + int rx_done_total = 0; + + mtk_handle_status_irq(eth); + + do { + int rx_done; + + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, + reg_map->pdma.irq_status); + rx_done = mtk_poll_rx(napi, budget - rx_done_total, eth); + rx_done_total += rx_done; + + if (unlikely(netif_msg_intr(eth))) { + dev_info(eth->dev, + "done rx %d, intr 0x%08x/0x%x\n", rx_done, + mtk_r32(eth, reg_map->pdma.irq_status), + mtk_r32(eth, reg_map->pdma.irq_mask)); + } + + if (rx_done_total == budget) + return budget; + + } while (mtk_r32(eth, reg_map->pdma.irq_status) & + eth->soc->txrx.rx_irq_done_mask); + + if (napi_complete_done(napi, rx_done_total)) + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); + + return rx_done_total; +} + +static int mtk_tx_alloc(struct mtk_eth *eth) +{ + const struct mtk_soc_data *soc = eth->soc; + struct mtk_tx_ring *ring = ð->tx_ring; + int i, sz = soc->txrx.txd_size; + struct mtk_tx_dma_v2 *txd; + + ring->buf = kcalloc(MTK_DMA_SIZE, sizeof(*ring->buf), + GFP_KERNEL); + if (!ring->buf) + goto no_tx_mem; + + ring->dma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, + &ring->phys, GFP_KERNEL); + if (!ring->dma) + goto no_tx_mem; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + int next = (i + 1) % MTK_DMA_SIZE; + u32 next_ptr = ring->phys + next * sz; + + txd = ring->dma + i * sz; + txd->txd2 = next_ptr; + txd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU; + txd->txd4 = 0; + if (MTK_HAS_CAPS(soc->caps, MTK_NETSYS_V2)) { + txd->txd5 = 0; + txd->txd6 = 0; + txd->txd7 = 0; + txd->txd8 = 0; + } + } + + /* On MT7688 (PDMA only) this driver uses the ring->dma structs + * only as the framework. The real HW descriptors are the PDMA + * descriptors in ring->dma_pdma. + */ + if (!MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + ring->dma_pdma = dma_alloc_coherent(eth->dma_dev, MTK_DMA_SIZE * sz, + &ring->phys_pdma, GFP_KERNEL); + if (!ring->dma_pdma) + goto no_tx_mem; + + for (i = 0; i < MTK_DMA_SIZE; i++) { + ring->dma_pdma[i].txd2 = TX_DMA_DESP2_DEF; + ring->dma_pdma[i].txd4 = 0; + } + } + + ring->dma_size = MTK_DMA_SIZE; + atomic_set(&ring->free_count, MTK_DMA_SIZE - 2); + ring->next_free = ring->dma; + ring->last_free = (void *)txd; + ring->last_free_ptr = (u32)(ring->phys + ((MTK_DMA_SIZE - 1) * sz)); + ring->thresh = MAX_SKB_FRAGS; + + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + if (MTK_HAS_CAPS(soc->caps, MTK_QDMA)) { + mtk_w32(eth, ring->phys, soc->reg_map->qdma.ctx_ptr); + mtk_w32(eth, ring->phys, soc->reg_map->qdma.dtx_ptr); + mtk_w32(eth, + ring->phys + ((MTK_DMA_SIZE - 1) * sz), + soc->reg_map->qdma.crx_ptr); + mtk_w32(eth, ring->last_free_ptr, soc->reg_map->qdma.drx_ptr); + mtk_w32(eth, (QDMA_RES_THRES << 8) | QDMA_RES_THRES, + soc->reg_map->qdma.qtx_cfg); + } else { + mtk_w32(eth, ring->phys_pdma, MT7628_TX_BASE_PTR0); + mtk_w32(eth, MTK_DMA_SIZE, MT7628_TX_MAX_CNT0); + mtk_w32(eth, 0, MT7628_TX_CTX_IDX0); + mtk_w32(eth, MT7628_PST_DTX_IDX0, soc->reg_map->pdma.rst_idx); + } + + return 0; + +no_tx_mem: + return -ENOMEM; +} + +static void mtk_tx_clean(struct mtk_eth *eth) +{ + const struct mtk_soc_data *soc = eth->soc; + struct mtk_tx_ring *ring = ð->tx_ring; + int i; + + if (ring->buf) { + for (i = 0; i < MTK_DMA_SIZE; i++) + mtk_tx_unmap(eth, &ring->buf[i], NULL, false); + kfree(ring->buf); + ring->buf = NULL; + } + + if (ring->dma) { + dma_free_coherent(eth->dma_dev, + MTK_DMA_SIZE * soc->txrx.txd_size, + ring->dma, ring->phys); + ring->dma = NULL; + } + + if (ring->dma_pdma) { + dma_free_coherent(eth->dma_dev, + MTK_DMA_SIZE * soc->txrx.txd_size, + ring->dma_pdma, ring->phys_pdma); + ring->dma_pdma = NULL; + } +} + +static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) +{ + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + struct mtk_rx_ring *ring; + int rx_data_len, rx_dma_size; + int i; + + if (rx_flag == MTK_RX_FLAGS_QDMA) { + if (ring_no) + return -EINVAL; + ring = ð->rx_ring_qdma; + } else { + ring = ð->rx_ring[ring_no]; + } + + if (rx_flag == MTK_RX_FLAGS_HWLRO) { + rx_data_len = MTK_MAX_LRO_RX_LENGTH; + rx_dma_size = MTK_HW_LRO_DMA_SIZE; + } else { + rx_data_len = ETH_DATA_LEN; + rx_dma_size = MTK_DMA_SIZE; + } + + ring->frag_size = mtk_max_frag_size(rx_data_len); + ring->buf_size = mtk_max_buf_size(ring->frag_size); + ring->data = kcalloc(rx_dma_size, sizeof(*ring->data), + GFP_KERNEL); + if (!ring->data) + return -ENOMEM; + + if (mtk_page_pool_enabled(eth)) { + struct page_pool *pp; + + pp = mtk_create_page_pool(eth, &ring->xdp_q, ring_no, + rx_dma_size); + if (IS_ERR(pp)) + return PTR_ERR(pp); + + ring->page_pool = pp; + } + + ring->dma = dma_alloc_coherent(eth->dma_dev, + rx_dma_size * eth->soc->txrx.rxd_size, + &ring->phys, GFP_KERNEL); + if (!ring->dma) + return -ENOMEM; + + for (i = 0; i < rx_dma_size; i++) { + struct mtk_rx_dma_v2 *rxd; + dma_addr_t dma_addr; + void *data; + + rxd = ring->dma + i * eth->soc->txrx.rxd_size; + if (ring->page_pool) { + data = mtk_page_pool_get_buff(ring->page_pool, + &dma_addr, GFP_KERNEL); + if (!data) + return -ENOMEM; + } else { + if (ring->frag_size <= PAGE_SIZE) + data = netdev_alloc_frag(ring->frag_size); + else + data = mtk_max_lro_buf_alloc(GFP_KERNEL); + + if (!data) + return -ENOMEM; + + dma_addr = dma_map_single(eth->dma_dev, + data + NET_SKB_PAD + eth->ip_align, + ring->buf_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(eth->dma_dev, + dma_addr))) { + skb_free_frag(data); + return -ENOMEM; + } + } + rxd->rxd1 = (unsigned int)dma_addr; + ring->data[i] = data; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + rxd->rxd2 = RX_DMA_LSO; + else + rxd->rxd2 = RX_DMA_PREP_PLEN0(ring->buf_size); + + rxd->rxd3 = 0; + rxd->rxd4 = 0; + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + rxd->rxd5 = 0; + rxd->rxd6 = 0; + rxd->rxd7 = 0; + rxd->rxd8 = 0; + } + } + + ring->dma_size = rx_dma_size; + ring->calc_idx_update = false; + ring->calc_idx = rx_dma_size - 1; + if (rx_flag == MTK_RX_FLAGS_QDMA) + ring->crx_idx_reg = reg_map->qdma.qcrx_ptr + + ring_no * MTK_QRX_OFFSET; + else + ring->crx_idx_reg = reg_map->pdma.pcrx_ptr + + ring_no * MTK_QRX_OFFSET; + /* make sure that all changes to the dma ring are flushed before we + * continue + */ + wmb(); + + if (rx_flag == MTK_RX_FLAGS_QDMA) { + mtk_w32(eth, ring->phys, + reg_map->qdma.rx_ptr + ring_no * MTK_QRX_OFFSET); + mtk_w32(eth, rx_dma_size, + reg_map->qdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), + reg_map->qdma.rst_idx); + } else { + mtk_w32(eth, ring->phys, + reg_map->pdma.rx_ptr + ring_no * MTK_QRX_OFFSET); + mtk_w32(eth, rx_dma_size, + reg_map->pdma.rx_cnt_cfg + ring_no * MTK_QRX_OFFSET); + mtk_w32(eth, MTK_PST_DRX_IDX_CFG(ring_no), + reg_map->pdma.rst_idx); + } + mtk_w32(eth, ring->calc_idx, ring->crx_idx_reg); + + return 0; +} + +static void mtk_rx_clean(struct mtk_eth *eth, struct mtk_rx_ring *ring) +{ + int i; + + if (ring->data && ring->dma) { + for (i = 0; i < ring->dma_size; i++) { + struct mtk_rx_dma *rxd; + + if (!ring->data[i]) + continue; + + rxd = ring->dma + i * eth->soc->txrx.rxd_size; + if (!rxd->rxd1) + continue; + + dma_unmap_single(eth->dma_dev, rxd->rxd1, + ring->buf_size, DMA_FROM_DEVICE); + mtk_rx_put_buff(ring, ring->data[i], false); + } + kfree(ring->data); + ring->data = NULL; + } + + if (ring->dma) { + dma_free_coherent(eth->dma_dev, + ring->dma_size * eth->soc->txrx.rxd_size, + ring->dma, ring->phys); + ring->dma = NULL; + } + + if (ring->page_pool) { + if (xdp_rxq_info_is_reg(&ring->xdp_q)) + xdp_rxq_info_unreg(&ring->xdp_q); + page_pool_destroy(ring->page_pool); + ring->page_pool = NULL; + } +} + +static int mtk_hwlro_rx_init(struct mtk_eth *eth) +{ + int i; + u32 ring_ctrl_dw1 = 0, ring_ctrl_dw2 = 0, ring_ctrl_dw3 = 0; + u32 lro_ctrl_dw0 = 0, lro_ctrl_dw3 = 0; + + /* set LRO rings to auto-learn modes */ + ring_ctrl_dw2 |= MTK_RING_AUTO_LERAN_MODE; + + /* validate LRO ring */ + ring_ctrl_dw2 |= MTK_RING_VLD; + + /* set AGE timer (unit: 20us) */ + ring_ctrl_dw2 |= MTK_RING_AGE_TIME_H; + ring_ctrl_dw1 |= MTK_RING_AGE_TIME_L; + + /* set max AGG timer (unit: 20us) */ + ring_ctrl_dw2 |= MTK_RING_MAX_AGG_TIME; + + /* set max LRO AGG count */ + ring_ctrl_dw2 |= MTK_RING_MAX_AGG_CNT_L; + ring_ctrl_dw3 |= MTK_RING_MAX_AGG_CNT_H; + + for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { + mtk_w32(eth, ring_ctrl_dw1, MTK_LRO_CTRL_DW1_CFG(i)); + mtk_w32(eth, ring_ctrl_dw2, MTK_LRO_CTRL_DW2_CFG(i)); + mtk_w32(eth, ring_ctrl_dw3, MTK_LRO_CTRL_DW3_CFG(i)); + } + + /* IPv4 checksum update enable */ + lro_ctrl_dw0 |= MTK_L3_CKS_UPD_EN; + + /* switch priority comparison to packet count mode */ + lro_ctrl_dw0 |= MTK_LRO_ALT_PKT_CNT_MODE; + + /* bandwidth threshold setting */ + mtk_w32(eth, MTK_HW_LRO_BW_THRE, MTK_PDMA_LRO_CTRL_DW2); + + /* auto-learn score delta setting */ + mtk_w32(eth, MTK_HW_LRO_REPLACE_DELTA, MTK_PDMA_LRO_ALT_SCORE_DELTA); + + /* set refresh timer for altering flows to 1 sec. (unit: 20us) */ + mtk_w32(eth, (MTK_HW_LRO_TIMER_UNIT << 16) | MTK_HW_LRO_REFRESH_TIME, + MTK_PDMA_LRO_ALT_REFRESH_TIMER); + + /* set HW LRO mode & the max aggregation count for rx packets */ + lro_ctrl_dw3 |= MTK_ADMA_MODE | (MTK_HW_LRO_MAX_AGG_CNT & 0xff); + + /* the minimal remaining room of SDL0 in RXD for lro aggregation */ + lro_ctrl_dw3 |= MTK_LRO_MIN_RXD_SDL; + + /* enable HW LRO */ + lro_ctrl_dw0 |= MTK_LRO_EN; + + mtk_w32(eth, lro_ctrl_dw3, MTK_PDMA_LRO_CTRL_DW3); + mtk_w32(eth, lro_ctrl_dw0, MTK_PDMA_LRO_CTRL_DW0); + + return 0; +} + +static void mtk_hwlro_rx_uninit(struct mtk_eth *eth) +{ + int i; + u32 val; + + /* relinquish lro rings, flush aggregated packets */ + mtk_w32(eth, MTK_LRO_RING_RELINQUISH_REQ, MTK_PDMA_LRO_CTRL_DW0); + + /* wait for relinquishments done */ + for (i = 0; i < 10; i++) { + val = mtk_r32(eth, MTK_PDMA_LRO_CTRL_DW0); + if (val & MTK_LRO_RING_RELINQUISH_DONE) { + msleep(20); + continue; + } + break; + } + + /* invalidate lro rings */ + for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) + mtk_w32(eth, 0, MTK_LRO_CTRL_DW2_CFG(i)); + + /* disable HW LRO */ + mtk_w32(eth, 0, MTK_PDMA_LRO_CTRL_DW0); +} + +static void mtk_hwlro_val_ipaddr(struct mtk_eth *eth, int idx, __be32 ip) +{ + u32 reg_val; + + reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); + + /* invalidate the IP setting */ + mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); + + mtk_w32(eth, ip, MTK_LRO_DIP_DW0_CFG(idx)); + + /* validate the IP setting */ + mtk_w32(eth, (reg_val | MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); +} + +static void mtk_hwlro_inval_ipaddr(struct mtk_eth *eth, int idx) +{ + u32 reg_val; + + reg_val = mtk_r32(eth, MTK_LRO_CTRL_DW2_CFG(idx)); + + /* invalidate the IP setting */ + mtk_w32(eth, (reg_val & ~MTK_RING_MYIP_VLD), MTK_LRO_CTRL_DW2_CFG(idx)); + + mtk_w32(eth, 0, MTK_LRO_DIP_DW0_CFG(idx)); +} + +static int mtk_hwlro_get_ip_cnt(struct mtk_mac *mac) +{ + int cnt = 0; + int i; + + for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (mac->hwlro_ip[i]) + cnt++; + } + + return cnt; +} + +static int mtk_hwlro_add_ipaddr(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int hwlro_idx; + + if ((fsp->flow_type != TCP_V4_FLOW) || + (!fsp->h_u.tcp_ip4_spec.ip4dst) || + (fsp->location > 1)) + return -EINVAL; + + mac->hwlro_ip[fsp->location] = htonl(fsp->h_u.tcp_ip4_spec.ip4dst); + hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; + + mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); + + mtk_hwlro_val_ipaddr(eth, hwlro_idx, mac->hwlro_ip[fsp->location]); + + return 0; +} + +static int mtk_hwlro_del_ipaddr(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int hwlro_idx; + + if (fsp->location > 1) + return -EINVAL; + + mac->hwlro_ip[fsp->location] = 0; + hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + fsp->location; + + mac->hwlro_ip_cnt = mtk_hwlro_get_ip_cnt(mac); + + mtk_hwlro_inval_ipaddr(eth, hwlro_idx); + + return 0; +} + +static void mtk_hwlro_netdev_disable(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int i, hwlro_idx; + + for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + mac->hwlro_ip[i] = 0; + hwlro_idx = (mac->id * MTK_MAX_LRO_IP_CNT) + i; + + mtk_hwlro_inval_ipaddr(eth, hwlro_idx); + } + + mac->hwlro_ip_cnt = 0; +} + +static int mtk_hwlro_get_fdir_entry(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (fsp->location >= ARRAY_SIZE(mac->hwlro_ip)) + return -EINVAL; + + /* only tcp dst ipv4 is meaningful, others are meaningless */ + fsp->flow_type = TCP_V4_FLOW; + fsp->h_u.tcp_ip4_spec.ip4dst = ntohl(mac->hwlro_ip[fsp->location]); + fsp->m_u.tcp_ip4_spec.ip4dst = 0; + + fsp->h_u.tcp_ip4_spec.ip4src = 0; + fsp->m_u.tcp_ip4_spec.ip4src = 0xffffffff; + fsp->h_u.tcp_ip4_spec.psrc = 0; + fsp->m_u.tcp_ip4_spec.psrc = 0xffff; + fsp->h_u.tcp_ip4_spec.pdst = 0; + fsp->m_u.tcp_ip4_spec.pdst = 0xffff; + fsp->h_u.tcp_ip4_spec.tos = 0; + fsp->m_u.tcp_ip4_spec.tos = 0xff; + + return 0; +} + +static int mtk_hwlro_get_fdir_all(struct net_device *dev, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct mtk_mac *mac = netdev_priv(dev); + int cnt = 0; + int i; + + for (i = 0; i < MTK_MAX_LRO_IP_CNT; i++) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + + if (mac->hwlro_ip[i]) { + rule_locs[cnt] = i; + cnt++; + } + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static netdev_features_t mtk_fix_features(struct net_device *dev, + netdev_features_t features) +{ + if (!(features & NETIF_F_LRO)) { + struct mtk_mac *mac = netdev_priv(dev); + int ip_cnt = mtk_hwlro_get_ip_cnt(mac); + + if (ip_cnt) { + netdev_info(dev, "RX flow is programmed, LRO should keep on\n"); + + features |= NETIF_F_LRO; + } + } + + return features; +} + +static int mtk_set_features(struct net_device *dev, netdev_features_t features) +{ + int err = 0; + + if (!((dev->features ^ features) & NETIF_F_LRO)) + return 0; + + if (!(features & NETIF_F_LRO)) + mtk_hwlro_netdev_disable(dev); + + return err; +} + +/* wait for DMA to finish whatever it is doing before we start using it again */ +static int mtk_dma_busy_wait(struct mtk_eth *eth) +{ + unsigned int reg; + int ret; + u32 val; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + reg = eth->soc->reg_map->qdma.glo_cfg; + else + reg = eth->soc->reg_map->pdma.glo_cfg; + + ret = readx_poll_timeout_atomic(__raw_readl, eth->base + reg, val, + !(val & (MTK_RX_DMA_BUSY | MTK_TX_DMA_BUSY)), + 5, MTK_DMA_BUSY_TIMEOUT_US); + if (ret) + dev_err(eth->dev, "DMA init timeout\n"); + + return ret; +} + +static int mtk_dma_init(struct mtk_eth *eth) +{ + int err; + u32 i; + + if (mtk_dma_busy_wait(eth)) + return -EBUSY; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + /* QDMA needs scratch memory for internal reordering of the + * descriptors + */ + err = mtk_init_fq_dma(eth); + if (err) + return err; + } + + err = mtk_tx_alloc(eth); + if (err) + return err; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_QDMA); + if (err) + return err; + } + + err = mtk_rx_alloc(eth, 0, MTK_RX_FLAGS_NORMAL); + if (err) + return err; + + if (eth->hwlro) { + for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) { + err = mtk_rx_alloc(eth, i, MTK_RX_FLAGS_HWLRO); + if (err) + return err; + } + err = mtk_hwlro_rx_init(eth); + if (err) + return err; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + /* Enable random early drop and set drop threshold + * automatically + */ + mtk_w32(eth, FC_THRES_DROP_MODE | FC_THRES_DROP_EN | + FC_THRES_MIN, eth->soc->reg_map->qdma.fc_th); + mtk_w32(eth, 0x0, eth->soc->reg_map->qdma.hred); + } + + return 0; +} + +static void mtk_dma_free(struct mtk_eth *eth) +{ + const struct mtk_soc_data *soc = eth->soc; + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) + if (eth->netdev[i]) + netdev_reset_queue(eth->netdev[i]); + if (eth->scratch_ring) { + dma_free_coherent(eth->dma_dev, + MTK_DMA_SIZE * soc->txrx.txd_size, + eth->scratch_ring, eth->phy_scratch_ring); + eth->scratch_ring = NULL; + eth->phy_scratch_ring = 0; + } + mtk_tx_clean(eth); + mtk_rx_clean(eth, ð->rx_ring[0]); + mtk_rx_clean(eth, ð->rx_ring_qdma); + + if (eth->hwlro) { + mtk_hwlro_rx_uninit(eth); + for (i = 1; i < MTK_MAX_RX_RING_NUM; i++) + mtk_rx_clean(eth, ð->rx_ring[i]); + } + + kfree(eth->scratch_head); +} + +static void mtk_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + eth->netdev[mac->id]->stats.tx_errors++; + netif_err(eth, tx_err, dev, + "transmit timed out\n"); + schedule_work(ð->pending_work); +} + +static irqreturn_t mtk_handle_irq_rx(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + + eth->rx_events++; + if (likely(napi_schedule_prep(ð->rx_napi))) { + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + __napi_schedule(ð->rx_napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t mtk_handle_irq_tx(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + + eth->tx_events++; + if (likely(napi_schedule_prep(ð->tx_napi))) { + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + __napi_schedule(ð->tx_napi); + } + + return IRQ_HANDLED; +} + +static irqreturn_t mtk_handle_irq(int irq, void *_eth) +{ + struct mtk_eth *eth = _eth; + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + + if (mtk_r32(eth, reg_map->pdma.irq_mask) & + eth->soc->txrx.rx_irq_done_mask) { + if (mtk_r32(eth, reg_map->pdma.irq_status) & + eth->soc->txrx.rx_irq_done_mask) + mtk_handle_irq_rx(irq, _eth); + } + if (mtk_r32(eth, reg_map->tx_irq_mask) & MTK_TX_DONE_INT) { + if (mtk_r32(eth, reg_map->tx_irq_status) & MTK_TX_DONE_INT) + mtk_handle_irq_tx(irq, _eth); + } + + return IRQ_HANDLED; +} + +#ifdef CONFIG_NET_POLL_CONTROLLER +static void mtk_poll_controller(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + mtk_handle_irq_rx(eth->irq[2], dev); + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_enable(eth, eth->soc->txrx.rx_irq_done_mask); +} +#endif + +static int mtk_start_dma(struct mtk_eth *eth) +{ + u32 val, rx_2b_offset = (NET_IP_ALIGN == 2) ? MTK_RX_2B_OFFSET : 0; + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + int err; + + err = mtk_dma_init(eth); + if (err) { + mtk_dma_free(eth); + return err; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) { + val = mtk_r32(eth, reg_map->qdma.glo_cfg); + val |= MTK_TX_DMA_EN | MTK_RX_DMA_EN | + MTK_TX_BT_32DWORDS | MTK_NDP_CO_PRO | + MTK_RX_2B_OFFSET | MTK_TX_WB_DDONE; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + val |= MTK_MUTLI_CNT | MTK_RESV_BUF | + MTK_WCOMP_EN | MTK_DMAD_WR_WDONE | + MTK_CHK_DDONE_EN; + else + val |= MTK_RX_BT_32DWORDS; + mtk_w32(eth, val, reg_map->qdma.glo_cfg); + + mtk_w32(eth, + MTK_RX_DMA_EN | rx_2b_offset | + MTK_RX_BT_32DWORDS | MTK_MULTI_EN, + reg_map->pdma.glo_cfg); + } else { + mtk_w32(eth, MTK_TX_WB_DDONE | MTK_TX_DMA_EN | MTK_RX_DMA_EN | + MTK_MULTI_EN | MTK_PDMA_SIZE_8DWORDS, + reg_map->pdma.glo_cfg); + } + + return 0; +} + +static void mtk_gdm_config(struct mtk_eth *eth, u32 config) +{ + int i; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + return; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + u32 val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); + + /* default setup the forward port to send frame to PDMA */ + val &= ~0xffff; + + /* Enable RX checksum */ + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; + + val |= config; + + if (!i && eth->netdev[0] && netdev_uses_dsa(eth->netdev[0])) + val |= MTK_GDMA_SPECIAL_TAG; + + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); + } + /* Reset and enable PSE */ + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); + mtk_w32(eth, 0, MTK_RST_GL); +} + +static int mtk_open(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int err; + + err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); + if (err) { + netdev_err(dev, "%s: could not attach PHY: %d\n", __func__, + err); + return err; + } + + /* we run 2 netdevs on the same dma ring so we only bring it up once */ + if (!refcount_read(ð->dma_refcnt)) { + const struct mtk_soc_data *soc = eth->soc; + u32 gdm_config; + int i; + + err = mtk_start_dma(eth); + if (err) { + phylink_disconnect_phy(mac->phylink); + return err; + } + + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) + mtk_ppe_start(eth->ppe[i]); + + gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe + : MTK_GDMA_TO_PDMA; + mtk_gdm_config(eth, gdm_config); + + napi_enable(ð->tx_napi); + napi_enable(ð->rx_napi); + mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_enable(eth, soc->txrx.rx_irq_done_mask); + refcount_set(ð->dma_refcnt, 1); + } + else + refcount_inc(ð->dma_refcnt); + + phylink_start(mac->phylink); + netif_start_queue(dev); + return 0; +} + +static void mtk_stop_dma(struct mtk_eth *eth, u32 glo_cfg) +{ + u32 val; + int i; + + /* stop the dma engine */ + spin_lock_bh(ð->page_lock); + val = mtk_r32(eth, glo_cfg); + mtk_w32(eth, val & ~(MTK_TX_WB_DDONE | MTK_RX_DMA_EN | MTK_TX_DMA_EN), + glo_cfg); + spin_unlock_bh(ð->page_lock); + + /* wait for dma stop */ + for (i = 0; i < 10; i++) { + val = mtk_r32(eth, glo_cfg); + if (val & (MTK_TX_DMA_BUSY | MTK_RX_DMA_BUSY)) { + msleep(20); + continue; + } + break; + } +} + +static int mtk_stop(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int i; + + phylink_stop(mac->phylink); + + netif_tx_disable(dev); + + phylink_disconnect_phy(mac->phylink); + + /* only shutdown DMA if this is the last user */ + if (!refcount_dec_and_test(ð->dma_refcnt)) + return 0; + + mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); + + mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); + mtk_rx_irq_disable(eth, eth->soc->txrx.rx_irq_done_mask); + napi_disable(ð->tx_napi); + napi_disable(ð->rx_napi); + + cancel_work_sync(ð->rx_dim.work); + cancel_work_sync(ð->tx_dim.work); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_stop_dma(eth, eth->soc->reg_map->qdma.glo_cfg); + mtk_stop_dma(eth, eth->soc->reg_map->pdma.glo_cfg); + + mtk_dma_free(eth); + + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) + mtk_ppe_stop(eth->ppe[i]); + + return 0; +} + +static int mtk_xdp_setup(struct net_device *dev, struct bpf_prog *prog, + struct netlink_ext_ack *extack) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + struct bpf_prog *old_prog; + bool need_update; + + if (eth->hwlro) { + NL_SET_ERR_MSG_MOD(extack, "XDP not supported with HWLRO"); + return -EOPNOTSUPP; + } + + if (dev->mtu > MTK_PP_MAX_BUF_SIZE) { + NL_SET_ERR_MSG_MOD(extack, "MTU too large for XDP"); + return -EOPNOTSUPP; + } + + need_update = !!eth->prog != !!prog; + if (netif_running(dev) && need_update) + mtk_stop(dev); + + old_prog = rcu_replace_pointer(eth->prog, prog, lockdep_rtnl_is_held()); + if (old_prog) + bpf_prog_put(old_prog); + + if (netif_running(dev) && need_update) + return mtk_open(dev); + + return 0; +} + +static int mtk_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + switch (xdp->command) { + case XDP_SETUP_PROG: + return mtk_xdp_setup(dev, xdp->prog, xdp->extack); + default: + return -EINVAL; + } +} + +static void ethsys_reset(struct mtk_eth *eth, u32 reset_bits) +{ + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, + reset_bits); + + usleep_range(1000, 1100); + regmap_update_bits(eth->ethsys, ETHSYS_RSTCTRL, + reset_bits, + ~reset_bits); + mdelay(10); +} + +static void mtk_clk_disable(struct mtk_eth *eth) +{ + int clk; + + for (clk = MTK_CLK_MAX - 1; clk >= 0; clk--) + clk_disable_unprepare(eth->clks[clk]); +} + +static int mtk_clk_enable(struct mtk_eth *eth) +{ + int clk, ret; + + for (clk = 0; clk < MTK_CLK_MAX ; clk++) { + ret = clk_prepare_enable(eth->clks[clk]); + if (ret) + goto err_disable_clks; + } + + return 0; + +err_disable_clks: + while (--clk >= 0) + clk_disable_unprepare(eth->clks[clk]); + + return ret; +} + +static void mtk_dim_rx(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct mtk_eth *eth = container_of(dim, struct mtk_eth, rx_dim); + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + struct dim_cq_moder cur_profile; + u32 val, cur; + + cur_profile = net_dim_get_rx_moderation(eth->rx_dim.mode, + dim->profile_ix); + spin_lock_bh(ð->dim_lock); + + val = mtk_r32(eth, reg_map->pdma.delay_irq); + val &= MTK_PDMA_DELAY_TX_MASK; + val |= MTK_PDMA_DELAY_RX_EN; + + cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK); + val |= cur << MTK_PDMA_DELAY_RX_PTIME_SHIFT; + + cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); + val |= cur << MTK_PDMA_DELAY_RX_PINT_SHIFT; + + mtk_w32(eth, val, reg_map->pdma.delay_irq); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_w32(eth, val, reg_map->qdma.delay_irq); + + spin_unlock_bh(ð->dim_lock); + + dim->state = DIM_START_MEASURE; +} + +static void mtk_dim_tx(struct work_struct *work) +{ + struct dim *dim = container_of(work, struct dim, work); + struct mtk_eth *eth = container_of(dim, struct mtk_eth, tx_dim); + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + struct dim_cq_moder cur_profile; + u32 val, cur; + + cur_profile = net_dim_get_tx_moderation(eth->tx_dim.mode, + dim->profile_ix); + spin_lock_bh(ð->dim_lock); + + val = mtk_r32(eth, reg_map->pdma.delay_irq); + val &= MTK_PDMA_DELAY_RX_MASK; + val |= MTK_PDMA_DELAY_TX_EN; + + cur = min_t(u32, DIV_ROUND_UP(cur_profile.usec, 20), MTK_PDMA_DELAY_PTIME_MASK); + val |= cur << MTK_PDMA_DELAY_TX_PTIME_SHIFT; + + cur = min_t(u32, cur_profile.pkts, MTK_PDMA_DELAY_PINT_MASK); + val |= cur << MTK_PDMA_DELAY_TX_PINT_SHIFT; + + mtk_w32(eth, val, reg_map->pdma.delay_irq); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_QDMA)) + mtk_w32(eth, val, reg_map->qdma.delay_irq); + + spin_unlock_bh(ð->dim_lock); + + dim->state = DIM_START_MEASURE; +} + +static void mtk_set_mcr_max_rx(struct mtk_mac *mac, u32 val) +{ + struct mtk_eth *eth = mac->hw; + u32 mcr_cur, mcr_new; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + return; + + mcr_cur = mtk_r32(mac->hw, MTK_MAC_MCR(mac->id)); + mcr_new = mcr_cur & ~MAC_MCR_MAX_RX_MASK; + + if (val <= 1518) + mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1518); + else if (val <= 1536) + mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1536); + else if (val <= 1552) + mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_1552); + else + mcr_new |= MAC_MCR_MAX_RX(MAC_MCR_MAX_RX_2048); + + if (mcr_new != mcr_cur) + mtk_w32(mac->hw, mcr_new, MTK_MAC_MCR(mac->id)); +} + +static int mtk_hw_init(struct mtk_eth *eth) +{ + u32 dma_mask = ETHSYS_DMA_AG_MAP_PDMA | ETHSYS_DMA_AG_MAP_QDMA | + ETHSYS_DMA_AG_MAP_PPE; + const struct mtk_reg_map *reg_map = eth->soc->reg_map; + int i, val, ret; + + if (test_and_set_bit(MTK_HW_INIT, ð->state)) + return 0; + + pm_runtime_enable(eth->dev); + pm_runtime_get_sync(eth->dev); + + ret = mtk_clk_enable(eth); + if (ret) + goto err_disable_pm; + + if (eth->ethsys) + regmap_update_bits(eth->ethsys, ETHSYS_DMA_AG_MAP, dma_mask, + of_dma_is_coherent(eth->dma_dev->of_node) * dma_mask); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + ret = device_reset(eth->dev); + if (ret) { + dev_err(eth->dev, "MAC reset failed!\n"); + goto err_disable_pm; + } + + /* set interrupt delays based on current Net DIM sample */ + mtk_dim_rx(ð->rx_dim.work); + mtk_dim_tx(ð->tx_dim.work); + + /* disable delay and normal interrupt */ + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); + + return 0; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, 0); + val = RSTCTRL_PPE0_V2; + } else { + val = RSTCTRL_PPE0; + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_RSTCTRL_PPE1)) + val |= RSTCTRL_PPE1; + + ethsys_reset(eth, RSTCTRL_ETH | RSTCTRL_FE | val); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + regmap_write(eth->ethsys, ETHSYS_FE_RST_CHK_IDLE_EN, + 0x3ffffff); + + /* Set FE to PDMAv2 if necessary */ + val = mtk_r32(eth, MTK_FE_GLO_MISC); + mtk_w32(eth, val | BIT(4), MTK_FE_GLO_MISC); + } + + if (eth->pctl) { + /* Set GE2 driving and slew rate */ + regmap_write(eth->pctl, GPIO_DRV_SEL10, 0xa00); + + /* set GE2 TDSEL */ + regmap_write(eth->pctl, GPIO_OD33_CTRL8, 0x5); + + /* set GE2 TUNE */ + regmap_write(eth->pctl, GPIO_BIAS_CTRL, 0x0); + } + + /* Set linkdown as the default for each GMAC. Its own MCR would be set + * up with the more appropriate value when mtk_mac_config call is being + * invoked. + */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + struct net_device *dev = eth->netdev[i]; + + mtk_w32(eth, MAC_MCR_FORCE_LINK_DOWN, MTK_MAC_MCR(i)); + if (dev) { + struct mtk_mac *mac = netdev_priv(dev); + + mtk_set_mcr_max_rx(mac, dev->mtu + MTK_RX_ETH_HLEN); + } + } + + /* Indicates CDM to parse the MTK special tag from CPU + * which also is working out for untag packets. + */ + val = mtk_r32(eth, MTK_CDMQ_IG_CTRL); + mtk_w32(eth, val | MTK_CDMQ_STAG_EN, MTK_CDMQ_IG_CTRL); + + /* Enable RX VLan Offloading */ + mtk_w32(eth, 1, MTK_CDMP_EG_CTRL); + + /* set interrupt delays based on current Net DIM sample */ + mtk_dim_rx(ð->rx_dim.work); + mtk_dim_tx(ð->tx_dim.work); + + /* disable delay and normal interrupt */ + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); + + /* FE int grouping */ + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->pdma.int_grp); + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->pdma.int_grp + 4); + mtk_w32(eth, MTK_TX_DONE_INT, reg_map->qdma.int_grp); + mtk_w32(eth, eth->soc->txrx.rx_irq_done_mask, reg_map->qdma.int_grp + 4); + mtk_w32(eth, 0x21021000, MTK_FE_INT_GRP); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + /* PSE should not drop port8 and port9 packets from WDMA Tx */ + mtk_w32(eth, 0x00000300, PSE_DROP_CFG); + + /* PSE should drop packets to port 8/9 on WDMA Rx ring full */ + mtk_w32(eth, 0x00000300, PSE_PPE0_DROP); + + /* PSE Free Queue Flow Control */ + mtk_w32(eth, 0x01fa01f4, PSE_FQFC_CFG2); + + /* PSE config input queue threshold */ + mtk_w32(eth, 0x001a000e, PSE_IQ_REV(1)); + mtk_w32(eth, 0x01ff001a, PSE_IQ_REV(2)); + mtk_w32(eth, 0x000e01ff, PSE_IQ_REV(3)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(4)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(5)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(6)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(7)); + mtk_w32(eth, 0x000e000e, PSE_IQ_REV(8)); + + /* PSE config output queue threshold */ + mtk_w32(eth, 0x000f000a, PSE_OQ_TH(1)); + mtk_w32(eth, 0x001a000f, PSE_OQ_TH(2)); + mtk_w32(eth, 0x000f001a, PSE_OQ_TH(3)); + mtk_w32(eth, 0x01ff000f, PSE_OQ_TH(4)); + mtk_w32(eth, 0x000f000f, PSE_OQ_TH(5)); + mtk_w32(eth, 0x0006000f, PSE_OQ_TH(6)); + mtk_w32(eth, 0x00060006, PSE_OQ_TH(7)); + mtk_w32(eth, 0x00060006, PSE_OQ_TH(8)); + + /* GDM and CDM Threshold */ + mtk_w32(eth, 0x00000004, MTK_GDM2_THRES); + mtk_w32(eth, 0x00000004, MTK_CDMW0_THRES); + mtk_w32(eth, 0x00000004, MTK_CDMW1_THRES); + mtk_w32(eth, 0x00000004, MTK_CDME0_THRES); + mtk_w32(eth, 0x00000004, MTK_CDME1_THRES); + mtk_w32(eth, 0x00000004, MTK_CDMM_THRES); + } + + return 0; + +err_disable_pm: + pm_runtime_put_sync(eth->dev); + pm_runtime_disable(eth->dev); + + return ret; +} + +static int mtk_hw_deinit(struct mtk_eth *eth) +{ + if (!test_and_clear_bit(MTK_HW_INIT, ð->state)) + return 0; + + mtk_clk_disable(eth); + + pm_runtime_put_sync(eth->dev); + pm_runtime_disable(eth->dev); + + return 0; +} + +static void mtk_uninit(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + phylink_disconnect_phy(mac->phylink); + mtk_tx_irq_disable(eth, ~0); + mtk_rx_irq_disable(eth, ~0); +} + +static int mtk_change_mtu(struct net_device *dev, int new_mtu) +{ + int length = new_mtu + MTK_RX_ETH_HLEN; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + + if (rcu_access_pointer(eth->prog) && + length > MTK_PP_MAX_BUF_SIZE) { + netdev_err(dev, "Invalid MTU for XDP mode\n"); + return -EINVAL; + } + + mtk_set_mcr_max_rx(mac, length); + dev->mtu = new_mtu; + + return 0; +} + +static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) +{ + struct mtk_mac *mac = netdev_priv(dev); + + switch (cmd) { + case SIOCGMIIPHY: + case SIOCGMIIREG: + case SIOCSMIIREG: + return phylink_mii_ioctl(mac->phylink, ifr, cmd); + default: + break; + } + + return -EOPNOTSUPP; +} + +static void mtk_pending_work(struct work_struct *work) +{ + struct mtk_eth *eth = container_of(work, struct mtk_eth, pending_work); + int err, i; + unsigned long restart = 0; + + rtnl_lock(); + + dev_dbg(eth->dev, "[%s][%d] reset\n", __func__, __LINE__); + + while (test_and_set_bit_lock(MTK_RESETTING, ð->state)) + cpu_relax(); + + dev_dbg(eth->dev, "[%s][%d] mtk_stop starts\n", __func__, __LINE__); + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + mtk_stop(eth->netdev[i]); + __set_bit(i, &restart); + } + dev_dbg(eth->dev, "[%s][%d] mtk_stop ends\n", __func__, __LINE__); + + /* restart underlying hardware such as power, clock, pin mux + * and the connected phy + */ + mtk_hw_deinit(eth); + + if (eth->dev->pins) + pinctrl_select_state(eth->dev->pins->p, + eth->dev->pins->default_state); + mtk_hw_init(eth); + + /* restart DMA and enable IRQs */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!test_bit(i, &restart)) + continue; + err = mtk_open(eth->netdev[i]); + if (err) { + netif_alert(eth, ifup, eth->netdev[i], + "Driver up/down cycle failed, closing device.\n"); + dev_close(eth->netdev[i]); + } + } + + dev_dbg(eth->dev, "[%s][%d] reset done\n", __func__, __LINE__); + + clear_bit_unlock(MTK_RESETTING, ð->state); + + rtnl_unlock(); +} + +static int mtk_free_dev(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + free_netdev(eth->netdev[i]); + } + + return 0; +} + +static int mtk_unreg_dev(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + unregister_netdev(eth->netdev[i]); + } + + return 0; +} + +static int mtk_cleanup(struct mtk_eth *eth) +{ + mtk_unreg_dev(eth); + mtk_free_dev(eth); + cancel_work_sync(ð->pending_work); + + return 0; +} + +static int mtk_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct mtk_mac *mac = netdev_priv(ndev); + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + + return phylink_ethtool_ksettings_get(mac->phylink, cmd); +} + +static int mtk_set_link_ksettings(struct net_device *ndev, + const struct ethtool_link_ksettings *cmd) +{ + struct mtk_mac *mac = netdev_priv(ndev); + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + + return phylink_ethtool_ksettings_set(mac->phylink, cmd); +} + +static void mtk_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct mtk_mac *mac = netdev_priv(dev); + + strscpy(info->driver, mac->hw->dev->driver->name, sizeof(info->driver)); + strscpy(info->bus_info, dev_name(mac->hw->dev), sizeof(info->bus_info)); + info->n_stats = ARRAY_SIZE(mtk_ethtool_stats); +} + +static u32 mtk_get_msglevel(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return mac->hw->msg_enable; +} + +static void mtk_set_msglevel(struct net_device *dev, u32 value) +{ + struct mtk_mac *mac = netdev_priv(dev); + + mac->hw->msg_enable = value; +} + +static int mtk_nway_reset(struct net_device *dev) +{ + struct mtk_mac *mac = netdev_priv(dev); + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return -EBUSY; + + if (!mac->phylink) + return -ENOTSUPP; + + return phylink_ethtool_nway_reset(mac->phylink); +} + +static void mtk_get_strings(struct net_device *dev, u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: { + struct mtk_mac *mac = netdev_priv(dev); + + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) { + memcpy(data, mtk_ethtool_stats[i].str, ETH_GSTRING_LEN); + data += ETH_GSTRING_LEN; + } + if (mtk_page_pool_enabled(mac->hw)) + page_pool_ethtool_stats_get_strings(data); + break; + } + default: + break; + } +} + +static int mtk_get_sset_count(struct net_device *dev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: { + int count = ARRAY_SIZE(mtk_ethtool_stats); + struct mtk_mac *mac = netdev_priv(dev); + + if (mtk_page_pool_enabled(mac->hw)) + count += page_pool_ethtool_stats_get_count(); + return count; + } + default: + return -EOPNOTSUPP; + } +} + +static void mtk_ethtool_pp_stats(struct mtk_eth *eth, u64 *data) +{ + struct page_pool_stats stats = {}; + int i; + + for (i = 0; i < ARRAY_SIZE(eth->rx_ring); i++) { + struct mtk_rx_ring *ring = ð->rx_ring[i]; + + if (!ring->page_pool) + continue; + + page_pool_get_stats(ring->page_pool, &stats); + } + page_pool_ethtool_stats_get(data, &stats); +} + +static void mtk_get_ethtool_stats(struct net_device *dev, + struct ethtool_stats *stats, u64 *data) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_hw_stats *hwstats = mac->hw_stats; + u64 *data_src, *data_dst; + unsigned int start; + int i; + + if (unlikely(test_bit(MTK_RESETTING, &mac->hw->state))) + return; + + if (netif_running(dev) && netif_device_present(dev)) { + if (spin_trylock_bh(&hwstats->stats_lock)) { + mtk_stats_update_mac(mac); + spin_unlock_bh(&hwstats->stats_lock); + } + } + + data_src = (u64 *)hwstats; + + do { + data_dst = data; + start = u64_stats_fetch_begin_irq(&hwstats->syncp); + + for (i = 0; i < ARRAY_SIZE(mtk_ethtool_stats); i++) + *data_dst++ = *(data_src + mtk_ethtool_stats[i].offset); + if (mtk_page_pool_enabled(mac->hw)) + mtk_ethtool_pp_stats(mac->hw, data_dst); + } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); +} + +static int mtk_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + if (dev->hw_features & NETIF_F_LRO) { + cmd->data = MTK_MAX_RX_RING_NUM; + ret = 0; + } + break; + case ETHTOOL_GRXCLSRLCNT: + if (dev->hw_features & NETIF_F_LRO) { + struct mtk_mac *mac = netdev_priv(dev); + + cmd->rule_cnt = mac->hwlro_ip_cnt; + ret = 0; + } + break; + case ETHTOOL_GRXCLSRULE: + if (dev->hw_features & NETIF_F_LRO) + ret = mtk_hwlro_get_fdir_entry(dev, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + if (dev->hw_features & NETIF_F_LRO) + ret = mtk_hwlro_get_fdir_all(dev, cmd, + rule_locs); + break; + default: + break; + } + + return ret; +} + +static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + if (dev->hw_features & NETIF_F_LRO) + ret = mtk_hwlro_add_ipaddr(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + if (dev->hw_features & NETIF_F_LRO) + ret = mtk_hwlro_del_ipaddr(dev, cmd); + break; + default: + break; + } + + return ret; +} + +static const struct ethtool_ops mtk_ethtool_ops = { + .get_link_ksettings = mtk_get_link_ksettings, + .set_link_ksettings = mtk_set_link_ksettings, + .get_drvinfo = mtk_get_drvinfo, + .get_msglevel = mtk_get_msglevel, + .set_msglevel = mtk_set_msglevel, + .nway_reset = mtk_nway_reset, + .get_link = ethtool_op_get_link, + .get_strings = mtk_get_strings, + .get_sset_count = mtk_get_sset_count, + .get_ethtool_stats = mtk_get_ethtool_stats, + .get_rxnfc = mtk_get_rxnfc, + .set_rxnfc = mtk_set_rxnfc, +}; + +static const struct net_device_ops mtk_netdev_ops = { + .ndo_uninit = mtk_uninit, + .ndo_open = mtk_open, + .ndo_stop = mtk_stop, + .ndo_start_xmit = mtk_start_xmit, + .ndo_set_mac_address = mtk_set_mac_address, + .ndo_validate_addr = eth_validate_addr, + .ndo_eth_ioctl = mtk_do_ioctl, + .ndo_change_mtu = mtk_change_mtu, + .ndo_tx_timeout = mtk_tx_timeout, + .ndo_get_stats64 = mtk_get_stats64, + .ndo_fix_features = mtk_fix_features, + .ndo_set_features = mtk_set_features, +#ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = mtk_poll_controller, +#endif + .ndo_setup_tc = mtk_eth_setup_tc, + .ndo_bpf = mtk_xdp, + .ndo_xdp_xmit = mtk_xdp_xmit, +}; + +static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np) +{ + const __be32 *_id = of_get_property(np, "reg", NULL); + phy_interface_t phy_mode; + struct phylink *phylink; + struct mtk_mac *mac; + int id, err; + + if (!_id) { + dev_err(eth->dev, "missing mac id\n"); + return -EINVAL; + } + + id = be32_to_cpup(_id); + if (id >= MTK_MAC_COUNT) { + dev_err(eth->dev, "%d is not a valid mac id\n", id); + return -EINVAL; + } + + if (eth->netdev[id]) { + dev_err(eth->dev, "duplicate mac id found: %d\n", id); + return -EINVAL; + } + + eth->netdev[id] = alloc_etherdev(sizeof(*mac)); + if (!eth->netdev[id]) { + dev_err(eth->dev, "alloc_etherdev failed\n"); + return -ENOMEM; + } + mac = netdev_priv(eth->netdev[id]); + eth->mac[id] = mac; + mac->id = id; + mac->hw = eth; + mac->of_node = np; + + err = of_get_ethdev_address(mac->of_node, eth->netdev[id]); + if (err == -EPROBE_DEFER) + return err; + + if (err) { + /* If the mac address is invalid, use random mac address */ + eth_hw_addr_random(eth->netdev[id]); + dev_err(eth->dev, "generated random MAC address %pM\n", + eth->netdev[id]->dev_addr); + } + + memset(mac->hwlro_ip, 0, sizeof(mac->hwlro_ip)); + mac->hwlro_ip_cnt = 0; + + mac->hw_stats = devm_kzalloc(eth->dev, + sizeof(*mac->hw_stats), + GFP_KERNEL); + if (!mac->hw_stats) { + dev_err(eth->dev, "failed to allocate counter memory\n"); + err = -ENOMEM; + goto free_netdev; + } + spin_lock_init(&mac->hw_stats->stats_lock); + u64_stats_init(&mac->hw_stats->syncp); + mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET; + + /* phylink create */ + err = of_get_phy_mode(np, &phy_mode); + if (err) { + dev_err(eth->dev, "incorrect phy-mode\n"); + goto free_netdev; + } + + /* mac config is not set */ + mac->interface = PHY_INTERFACE_MODE_NA; + mac->speed = SPEED_UNKNOWN; + + mac->phylink_config.dev = ð->netdev[id]->dev; + mac->phylink_config.type = PHYLINK_NETDEV; + /* This driver makes use of state->speed in mac_config */ + mac->phylink_config.legacy_pre_march2020 = true; + mac->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_10 | MAC_100 | MAC_1000 | MAC_2500FD; + + __set_bit(PHY_INTERFACE_MODE_MII, + mac->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_GMII, + mac->phylink_config.supported_interfaces); + + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_RGMII)) + phy_interface_set_rgmii(mac->phylink_config.supported_interfaces); + + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_TRGMII) && !mac->id) + __set_bit(PHY_INTERFACE_MODE_TRGMII, + mac->phylink_config.supported_interfaces); + + if (MTK_HAS_CAPS(mac->hw->soc->caps, MTK_SGMII)) { + __set_bit(PHY_INTERFACE_MODE_SGMII, + mac->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_1000BASEX, + mac->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_2500BASEX, + mac->phylink_config.supported_interfaces); + } + + phylink = phylink_create(&mac->phylink_config, + of_fwnode_handle(mac->of_node), + phy_mode, &mtk_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + goto free_netdev; + } + + mac->phylink = phylink; + + SET_NETDEV_DEV(eth->netdev[id], eth->dev); + eth->netdev[id]->watchdog_timeo = 5 * HZ; + eth->netdev[id]->netdev_ops = &mtk_netdev_ops; + eth->netdev[id]->base_addr = (unsigned long)eth->base; + + eth->netdev[id]->hw_features = eth->soc->hw_features; + if (eth->hwlro) + eth->netdev[id]->hw_features |= NETIF_F_LRO; + + eth->netdev[id]->vlan_features = eth->soc->hw_features & + ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX); + eth->netdev[id]->features |= eth->soc->hw_features; + eth->netdev[id]->ethtool_ops = &mtk_ethtool_ops; + + eth->netdev[id]->irq = eth->irq[0]; + eth->netdev[id]->dev.of_node = np; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN; + else + eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH_2K - MTK_RX_ETH_HLEN; + + return 0; + +free_netdev: + free_netdev(eth->netdev[id]); + return err; +} + +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev) +{ + struct net_device *dev, *tmp; + LIST_HEAD(dev_list); + int i; + + rtnl_lock(); + + for (i = 0; i < MTK_MAC_COUNT; i++) { + dev = eth->netdev[i]; + + if (!dev || !(dev->flags & IFF_UP)) + continue; + + list_add_tail(&dev->close_list, &dev_list); + } + + dev_close_many(&dev_list, false); + + eth->dma_dev = dma_dev; + + list_for_each_entry_safe(dev, tmp, &dev_list, close_list) { + list_del_init(&dev->close_list); + dev_open(dev, NULL); + } + + rtnl_unlock(); +} + +static int mtk_probe(struct platform_device *pdev) +{ + struct resource *res = NULL; + struct device_node *mac_np; + struct mtk_eth *eth; + int err, i; + + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); + if (!eth) + return -ENOMEM; + + eth->soc = of_device_get_match_data(&pdev->dev); + + eth->dev = &pdev->dev; + eth->dma_dev = &pdev->dev; + eth->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(eth->base)) + return PTR_ERR(eth->base); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + eth->ip_align = NET_IP_ALIGN; + + spin_lock_init(ð->page_lock); + spin_lock_init(ð->tx_irq_lock); + spin_lock_init(ð->rx_irq_lock); + spin_lock_init(ð->dim_lock); + + eth->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + INIT_WORK(ð->rx_dim.work, mtk_dim_rx); + + eth->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + INIT_WORK(ð->tx_dim.work, mtk_dim_tx); + + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + eth->ethsys = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,ethsys"); + if (IS_ERR(eth->ethsys)) { + dev_err(&pdev->dev, "no ethsys regmap found\n"); + return PTR_ERR(eth->ethsys); + } + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_INFRA)) { + eth->infra = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,infracfg"); + if (IS_ERR(eth->infra)) { + dev_err(&pdev->dev, "no infracfg regmap found\n"); + return PTR_ERR(eth->infra); + } + } + + if (of_dma_is_coherent(pdev->dev.of_node)) { + struct regmap *cci; + + cci = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "cci-control-port"); + /* enable CPU/bus coherency */ + if (!IS_ERR(cci)) + regmap_write(cci, 0, 3); + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SGMII)) { + eth->sgmii = devm_kzalloc(eth->dev, sizeof(*eth->sgmii), + GFP_KERNEL); + if (!eth->sgmii) + return -ENOMEM; + + err = mtk_sgmii_init(eth->sgmii, pdev->dev.of_node, + eth->soc->ana_rgc3); + + if (err) + return err; + } + + if (eth->soc->required_pctl) { + eth->pctl = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, + "mediatek,pctl"); + if (IS_ERR(eth->pctl)) { + dev_err(&pdev->dev, "no pctl regmap found\n"); + return PTR_ERR(eth->pctl); + } + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + } + + if (eth->soc->offload_version) { + for (i = 0;; i++) { + struct device_node *np; + phys_addr_t wdma_phy; + u32 wdma_base; + + if (i >= ARRAY_SIZE(eth->soc->reg_map->wdma_base)) + break; + + np = of_parse_phandle(pdev->dev.of_node, + "mediatek,wed", i); + if (!np) + break; + + wdma_base = eth->soc->reg_map->wdma_base[i]; + wdma_phy = res ? res->start + wdma_base : 0; + mtk_wed_add_hw(np, eth, eth->base + wdma_base, + wdma_phy, i); + } + } + + for (i = 0; i < 3; i++) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT) && i > 0) + eth->irq[i] = eth->irq[0]; + else + eth->irq[i] = platform_get_irq(pdev, i); + if (eth->irq[i] < 0) { + dev_err(&pdev->dev, "no IRQ%d resource found\n", i); + err = -ENXIO; + goto err_wed_exit; + } + } + for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { + eth->clks[i] = devm_clk_get(eth->dev, + mtk_clks_source_name[i]); + if (IS_ERR(eth->clks[i])) { + if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_wed_exit; + } + if (eth->soc->required_clks & BIT(i)) { + dev_err(&pdev->dev, "clock %s not found\n", + mtk_clks_source_name[i]); + err = -EINVAL; + goto err_wed_exit; + } + eth->clks[i] = NULL; + } + } + + eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE); + INIT_WORK(ð->pending_work, mtk_pending_work); + + err = mtk_hw_init(eth); + if (err) + goto err_wed_exit; + + eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); + + for_each_child_of_node(pdev->dev.of_node, mac_np) { + if (!of_device_is_compatible(mac_np, + "mediatek,eth-mac")) + continue; + + if (!of_device_is_available(mac_np)) + continue; + + err = mtk_add_mac(eth, mac_np); + if (err) { + of_node_put(mac_np); + goto err_deinit_hw; + } + } + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_SHARED_INT)) { + err = devm_request_irq(eth->dev, eth->irq[0], + mtk_handle_irq, 0, + dev_name(eth->dev), eth); + } else { + err = devm_request_irq(eth->dev, eth->irq[1], + mtk_handle_irq_tx, 0, + dev_name(eth->dev), eth); + if (err) + goto err_free_dev; + + err = devm_request_irq(eth->dev, eth->irq[2], + mtk_handle_irq_rx, 0, + dev_name(eth->dev), eth); + } + if (err) + goto err_free_dev; + + /* No MT7628/88 support yet */ + if (!MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) { + err = mtk_mdio_init(eth); + if (err) + goto err_free_dev; + } + + if (eth->soc->offload_version) { + u32 num_ppe; + + num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; + num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); + for (i = 0; i < num_ppe; i++) { + u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; + + eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, + eth->soc->offload_version, i); + if (!eth->ppe[i]) { + err = -ENOMEM; + goto err_deinit_ppe; + } + } + + err = mtk_eth_offload_init(eth); + if (err) + goto err_deinit_ppe; + } + + for (i = 0; i < MTK_MAX_DEVS; i++) { + if (!eth->netdev[i]) + continue; + + err = register_netdev(eth->netdev[i]); + if (err) { + dev_err(eth->dev, "error bringing up device\n"); + goto err_deinit_ppe; + } else + netif_info(eth, probe, eth->netdev[i], + "mediatek frame engine at 0x%08lx, irq %d\n", + eth->netdev[i]->base_addr, eth->irq[0]); + } + + /* we run 2 devices on the same DMA ring so we need a dummy device + * for NAPI to work + */ + init_dummy_netdev(ð->dummy_dev); + netif_napi_add(ð->dummy_dev, ð->tx_napi, mtk_napi_tx); + netif_napi_add(ð->dummy_dev, ð->rx_napi, mtk_napi_rx); + + platform_set_drvdata(pdev, eth); + + return 0; + +err_deinit_ppe: + mtk_ppe_deinit(eth); + mtk_mdio_cleanup(eth); +err_free_dev: + mtk_free_dev(eth); +err_deinit_hw: + mtk_hw_deinit(eth); +err_wed_exit: + mtk_wed_exit(); + + return err; +} + +static int mtk_remove(struct platform_device *pdev) +{ + struct mtk_eth *eth = platform_get_drvdata(pdev); + struct mtk_mac *mac; + int i; + + /* stop all devices to make sure that dma is properly shut down */ + for (i = 0; i < MTK_MAC_COUNT; i++) { + if (!eth->netdev[i]) + continue; + mtk_stop(eth->netdev[i]); + mac = netdev_priv(eth->netdev[i]); + phylink_disconnect_phy(mac->phylink); + } + + mtk_wed_exit(); + mtk_hw_deinit(eth); + + netif_napi_del(ð->tx_napi); + netif_napi_del(ð->rx_napi); + mtk_cleanup(eth); + mtk_mdio_cleanup(eth); + + return 0; +} + +static const struct mtk_soc_data mt2701_data = { + .reg_map = &mtk_reg_map, + .caps = MT7623_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7623_CLKS_BITMAP, + .required_pctl = true, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +}; + +static const struct mtk_soc_data mt7621_data = { + .reg_map = &mtk_reg_map, + .caps = MT7621_CAPS, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7621_CLKS_BITMAP, + .required_pctl = false, + .offload_version = 2, + .hash_offset = 2, + .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +}; + +static const struct mtk_soc_data mt7622_data = { + .reg_map = &mtk_reg_map, + .ana_rgc3 = 0x2028, + .caps = MT7622_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7622_CLKS_BITMAP, + .required_pctl = false, + .offload_version = 2, + .hash_offset = 2, + .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +}; + +static const struct mtk_soc_data mt7623_data = { + .reg_map = &mtk_reg_map, + .caps = MT7623_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7623_CLKS_BITMAP, + .required_pctl = true, + .offload_version = 2, + .hash_offset = 2, + .foe_entry_size = sizeof(struct mtk_foe_entry) - 16, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +}; + +static const struct mtk_soc_data mt7629_data = { + .reg_map = &mtk_reg_map, + .ana_rgc3 = 0x128, + .caps = MT7629_CAPS | MTK_HWLRO, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7629_CLKS_BITMAP, + .required_pctl = false, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +}; + +static const struct mtk_soc_data mt7986_data = { + .reg_map = &mt7986_reg_map, + .ana_rgc3 = 0x128, + .caps = MT7986_CAPS, + .hw_features = MTK_HW_FEATURES, + .required_clks = MT7986_CLKS_BITMAP, + .required_pctl = false, + .hash_offset = 4, + .foe_entry_size = sizeof(struct mtk_foe_entry), + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma_v2), + .rxd_size = sizeof(struct mtk_rx_dma_v2), + .rx_irq_done_mask = MTK_RX_DONE_INT_V2, + .rx_dma_l4_valid = RX_DMA_L4_VALID_V2, + .dma_max_len = MTK_TX_DMA_BUF_LEN_V2, + .dma_len_offset = 8, + }, +}; + +static const struct mtk_soc_data rt5350_data = { + .reg_map = &mt7628_reg_map, + .caps = MT7628_CAPS, + .hw_features = MTK_HW_FEATURES_MT7628, + .required_clks = MT7628_CLKS_BITMAP, + .required_pctl = false, + .txrx = { + .txd_size = sizeof(struct mtk_tx_dma), + .rxd_size = sizeof(struct mtk_rx_dma), + .rx_irq_done_mask = MTK_RX_DONE_INT, + .rx_dma_l4_valid = RX_DMA_L4_VALID_PDMA, + .dma_max_len = MTK_TX_DMA_BUF_LEN, + .dma_len_offset = 16, + }, +}; + +const struct of_device_id of_mtk_match[] = { + { .compatible = "mediatek,mt2701-eth", .data = &mt2701_data}, + { .compatible = "mediatek,mt7621-eth", .data = &mt7621_data}, + { .compatible = "mediatek,mt7622-eth", .data = &mt7622_data}, + { .compatible = "mediatek,mt7623-eth", .data = &mt7623_data}, + { .compatible = "mediatek,mt7629-eth", .data = &mt7629_data}, + { .compatible = "mediatek,mt7986-eth", .data = &mt7986_data}, + { .compatible = "ralink,rt5350-eth", .data = &rt5350_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, of_mtk_match); + +static struct platform_driver mtk_driver = { + .probe = mtk_probe, + .remove = mtk_remove, + .driver = { + .name = "mtk_soc_eth", + .of_match_table = of_mtk_match, + }, +}; + +module_platform_driver(mtk_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); +MODULE_DESCRIPTION("Ethernet driver for MediaTek SoC"); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h new file mode 100644 index 000000000..dafa9a0ba --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -0,0 +1,1246 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * + * Copyright (C) 2009-2016 John Crispin <blogic@openwrt.org> + * Copyright (C) 2009-2016 Felix Fietkau <nbd@openwrt.org> + * Copyright (C) 2013-2016 Michael Lee <igvtee@gmail.com> + */ + +#ifndef MTK_ETH_H +#define MTK_ETH_H + +#include <linux/dma-mapping.h> +#include <linux/netdevice.h> +#include <linux/of_net.h> +#include <linux/u64_stats_sync.h> +#include <linux/refcount.h> +#include <linux/phylink.h> +#include <linux/rhashtable.h> +#include <linux/dim.h> +#include <linux/bitfield.h> +#include <net/page_pool.h> +#include <linux/bpf_trace.h> +#include "mtk_ppe.h" + +#define MTK_QDMA_PAGE_SIZE 2048 +#define MTK_MAX_RX_LENGTH 1536 +#define MTK_MAX_RX_LENGTH_2K 2048 +#define MTK_TX_DMA_BUF_LEN 0x3fff +#define MTK_TX_DMA_BUF_LEN_V2 0xffff +#define MTK_DMA_SIZE 512 +#define MTK_MAC_COUNT 2 +#define MTK_RX_ETH_HLEN (ETH_HLEN + ETH_FCS_LEN) +#define MTK_RX_HLEN (NET_SKB_PAD + MTK_RX_ETH_HLEN + NET_IP_ALIGN) +#define MTK_DMA_DUMMY_DESC 0xffffffff +#define MTK_DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | \ + NETIF_MSG_PROBE | \ + NETIF_MSG_LINK | \ + NETIF_MSG_TIMER | \ + NETIF_MSG_IFDOWN | \ + NETIF_MSG_IFUP | \ + NETIF_MSG_RX_ERR | \ + NETIF_MSG_TX_ERR) +#define MTK_HW_FEATURES (NETIF_F_IP_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_SG | NETIF_F_TSO | \ + NETIF_F_TSO6 | \ + NETIF_F_IPV6_CSUM |\ + NETIF_F_HW_TC) +#define MTK_HW_FEATURES_MT7628 (NETIF_F_SG | NETIF_F_RXCSUM) +#define NEXT_DESP_IDX(X, Y) (((X) + 1) & ((Y) - 1)) + +#define MTK_PP_HEADROOM XDP_PACKET_HEADROOM +#define MTK_PP_PAD (MTK_PP_HEADROOM + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +#define MTK_PP_MAX_BUF_SIZE (PAGE_SIZE - MTK_PP_PAD) + +#define MTK_QRX_OFFSET 0x10 + +#define MTK_MAX_RX_RING_NUM 4 +#define MTK_HW_LRO_DMA_SIZE 8 + +#define MTK_MAX_LRO_RX_LENGTH (4096 * 3) +#define MTK_MAX_LRO_IP_CNT 2 +#define MTK_HW_LRO_TIMER_UNIT 1 /* 20 us */ +#define MTK_HW_LRO_REFRESH_TIME 50000 /* 1 sec. */ +#define MTK_HW_LRO_AGG_TIME 10 /* 200us */ +#define MTK_HW_LRO_AGE_TIME 50 /* 1ms */ +#define MTK_HW_LRO_MAX_AGG_CNT 64 +#define MTK_HW_LRO_BW_THRE 3000 +#define MTK_HW_LRO_REPLACE_DELTA 1000 +#define MTK_HW_LRO_SDL_REMAIN_ROOM 1522 + +/* Frame Engine Global Reset Register */ +#define MTK_RST_GL 0x04 +#define RST_GL_PSE BIT(0) + +/* Frame Engine Interrupt Status Register */ +#define MTK_INT_STATUS2 0x08 +#define MTK_GDM1_AF BIT(28) +#define MTK_GDM2_AF BIT(29) + +/* PDMA HW LRO Alter Flow Timer Register */ +#define MTK_PDMA_LRO_ALT_REFRESH_TIMER 0x1c + +/* Frame Engine Interrupt Grouping Register */ +#define MTK_FE_INT_GRP 0x20 + +/* CDMP Ingress Control Register */ +#define MTK_CDMQ_IG_CTRL 0x1400 +#define MTK_CDMQ_STAG_EN BIT(0) + +/* CDMP Ingress Control Register */ +#define MTK_CDMP_IG_CTRL 0x400 +#define MTK_CDMP_STAG_EN BIT(0) + +/* CDMP Exgress Control Register */ +#define MTK_CDMP_EG_CTRL 0x404 + +/* GDM Exgress Control Register */ +#define MTK_GDMA_FWD_CFG(x) (0x500 + (x * 0x1000)) +#define MTK_GDMA_SPECIAL_TAG BIT(24) +#define MTK_GDMA_ICS_EN BIT(22) +#define MTK_GDMA_TCS_EN BIT(21) +#define MTK_GDMA_UCS_EN BIT(20) +#define MTK_GDMA_TO_PDMA 0x0 +#define MTK_GDMA_DROP_ALL 0x7777 + +/* Unicast Filter MAC Address Register - Low */ +#define MTK_GDMA_MAC_ADRL(x) (0x508 + (x * 0x1000)) + +/* Unicast Filter MAC Address Register - High */ +#define MTK_GDMA_MAC_ADRH(x) (0x50C + (x * 0x1000)) + +/* FE global misc reg*/ +#define MTK_FE_GLO_MISC 0x124 + +/* PSE Free Queue Flow Control */ +#define PSE_FQFC_CFG1 0x100 +#define PSE_FQFC_CFG2 0x104 +#define PSE_DROP_CFG 0x108 +#define PSE_PPE0_DROP 0x110 + +/* PSE Input Queue Reservation Register*/ +#define PSE_IQ_REV(x) (0x140 + (((x) - 1) << 2)) + +/* PSE Output Queue Threshold Register*/ +#define PSE_OQ_TH(x) (0x160 + (((x) - 1) << 2)) + +/* GDM and CDM Threshold */ +#define MTK_GDM2_THRES 0x1530 +#define MTK_CDMW0_THRES 0x164c +#define MTK_CDMW1_THRES 0x1650 +#define MTK_CDME0_THRES 0x1654 +#define MTK_CDME1_THRES 0x1658 +#define MTK_CDMM_THRES 0x165c + +/* PDMA HW LRO Control Registers */ +#define MTK_PDMA_LRO_CTRL_DW0 0x980 +#define MTK_LRO_EN BIT(0) +#define MTK_L3_CKS_UPD_EN BIT(7) +#define MTK_L3_CKS_UPD_EN_V2 BIT(19) +#define MTK_LRO_ALT_PKT_CNT_MODE BIT(21) +#define MTK_LRO_RING_RELINQUISH_REQ (0x7 << 26) +#define MTK_LRO_RING_RELINQUISH_REQ_V2 (0xf << 24) +#define MTK_LRO_RING_RELINQUISH_DONE (0x7 << 29) +#define MTK_LRO_RING_RELINQUISH_DONE_V2 (0xf << 28) + +#define MTK_PDMA_LRO_CTRL_DW1 0x984 +#define MTK_PDMA_LRO_CTRL_DW2 0x988 +#define MTK_PDMA_LRO_CTRL_DW3 0x98c +#define MTK_ADMA_MODE BIT(15) +#define MTK_LRO_MIN_RXD_SDL (MTK_HW_LRO_SDL_REMAIN_ROOM << 16) + +#define MTK_RX_DMA_LRO_EN BIT(8) +#define MTK_MULTI_EN BIT(10) +#define MTK_PDMA_SIZE_8DWORDS (1 << 4) + +/* PDMA Global Configuration Register */ +#define MTK_PDMA_LRO_SDL 0x3000 +#define MTK_RX_CFG_SDL_OFFSET 16 + +/* PDMA Reset Index Register */ +#define MTK_PST_DRX_IDX0 BIT(16) +#define MTK_PST_DRX_IDX_CFG(x) (MTK_PST_DRX_IDX0 << (x)) + +/* PDMA Delay Interrupt Register */ +#define MTK_PDMA_DELAY_RX_MASK GENMASK(15, 0) +#define MTK_PDMA_DELAY_RX_EN BIT(15) +#define MTK_PDMA_DELAY_RX_PINT_SHIFT 8 +#define MTK_PDMA_DELAY_RX_PTIME_SHIFT 0 + +#define MTK_PDMA_DELAY_TX_MASK GENMASK(31, 16) +#define MTK_PDMA_DELAY_TX_EN BIT(31) +#define MTK_PDMA_DELAY_TX_PINT_SHIFT 24 +#define MTK_PDMA_DELAY_TX_PTIME_SHIFT 16 + +#define MTK_PDMA_DELAY_PINT_MASK 0x7f +#define MTK_PDMA_DELAY_PTIME_MASK 0xff + +/* PDMA HW LRO Alter Flow Delta Register */ +#define MTK_PDMA_LRO_ALT_SCORE_DELTA 0xa4c + +/* PDMA HW LRO IP Setting Registers */ +#define MTK_LRO_RX_RING0_DIP_DW0 0xb04 +#define MTK_LRO_DIP_DW0_CFG(x) (MTK_LRO_RX_RING0_DIP_DW0 + (x * 0x40)) +#define MTK_RING_MYIP_VLD BIT(9) + +/* PDMA HW LRO Ring Control Registers */ +#define MTK_LRO_RX_RING0_CTRL_DW1 0xb28 +#define MTK_LRO_RX_RING0_CTRL_DW2 0xb2c +#define MTK_LRO_RX_RING0_CTRL_DW3 0xb30 +#define MTK_LRO_CTRL_DW1_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW1 + (x * 0x40)) +#define MTK_LRO_CTRL_DW2_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW2 + (x * 0x40)) +#define MTK_LRO_CTRL_DW3_CFG(x) (MTK_LRO_RX_RING0_CTRL_DW3 + (x * 0x40)) +#define MTK_RING_AGE_TIME_L ((MTK_HW_LRO_AGE_TIME & 0x3ff) << 22) +#define MTK_RING_AGE_TIME_H ((MTK_HW_LRO_AGE_TIME >> 10) & 0x3f) +#define MTK_RING_AUTO_LERAN_MODE (3 << 6) +#define MTK_RING_VLD BIT(8) +#define MTK_RING_MAX_AGG_TIME ((MTK_HW_LRO_AGG_TIME & 0xffff) << 10) +#define MTK_RING_MAX_AGG_CNT_L ((MTK_HW_LRO_MAX_AGG_CNT & 0x3f) << 26) +#define MTK_RING_MAX_AGG_CNT_H ((MTK_HW_LRO_MAX_AGG_CNT >> 6) & 0x3) + +/* QDMA TX Queue Configuration Registers */ +#define QDMA_RES_THRES 4 + +/* QDMA Global Configuration Register */ +#define MTK_RX_2B_OFFSET BIT(31) +#define MTK_RX_BT_32DWORDS (3 << 11) +#define MTK_NDP_CO_PRO BIT(10) +#define MTK_TX_WB_DDONE BIT(6) +#define MTK_TX_BT_32DWORDS (3 << 4) +#define MTK_RX_DMA_BUSY BIT(3) +#define MTK_TX_DMA_BUSY BIT(1) +#define MTK_RX_DMA_EN BIT(2) +#define MTK_TX_DMA_EN BIT(0) +#define MTK_DMA_BUSY_TIMEOUT_US 1000000 + +/* QDMA V2 Global Configuration Register */ +#define MTK_CHK_DDONE_EN BIT(28) +#define MTK_DMAD_WR_WDONE BIT(26) +#define MTK_WCOMP_EN BIT(24) +#define MTK_RESV_BUF (0x40 << 16) +#define MTK_MUTLI_CNT (0x4 << 12) + +/* QDMA Flow Control Register */ +#define FC_THRES_DROP_MODE BIT(20) +#define FC_THRES_DROP_EN (7 << 16) +#define FC_THRES_MIN 0x4444 + +/* QDMA Interrupt Status Register */ +#define MTK_RX_DONE_DLY BIT(30) +#define MTK_TX_DONE_DLY BIT(28) +#define MTK_RX_DONE_INT3 BIT(19) +#define MTK_RX_DONE_INT2 BIT(18) +#define MTK_RX_DONE_INT1 BIT(17) +#define MTK_RX_DONE_INT0 BIT(16) +#define MTK_TX_DONE_INT3 BIT(3) +#define MTK_TX_DONE_INT2 BIT(2) +#define MTK_TX_DONE_INT1 BIT(1) +#define MTK_TX_DONE_INT0 BIT(0) +#define MTK_RX_DONE_INT MTK_RX_DONE_DLY +#define MTK_TX_DONE_INT MTK_TX_DONE_DLY + +#define MTK_RX_DONE_INT_V2 BIT(14) + +/* QDMA Interrupt grouping registers */ +#define MTK_RLS_DONE_INT BIT(0) + +#define MTK_STAT_OFFSET 0x40 + +/* QDMA TX NUM */ +#define MTK_QDMA_TX_NUM 16 +#define MTK_QDMA_TX_MASK (MTK_QDMA_TX_NUM - 1) +#define QID_BITS_V2(x) (((x) & 0x3f) << 16) +#define MTK_QDMA_GMAC2_QID 8 + +#define MTK_TX_DMA_BUF_SHIFT 8 + +/* QDMA V2 descriptor txd6 */ +#define TX_DMA_INS_VLAN_V2 BIT(16) +/* QDMA V2 descriptor txd5 */ +#define TX_DMA_CHKSUM_V2 (0x7 << 28) +#define TX_DMA_TSO_V2 BIT(31) + +/* QDMA V2 descriptor txd4 */ +#define TX_DMA_FPORT_SHIFT_V2 8 +#define TX_DMA_FPORT_MASK_V2 0xf +#define TX_DMA_SWC_V2 BIT(30) + +/* QDMA descriptor txd4 */ +#define TX_DMA_CHKSUM (0x7 << 29) +#define TX_DMA_TSO BIT(28) +#define TX_DMA_FPORT_SHIFT 25 +#define TX_DMA_FPORT_MASK 0x7 +#define TX_DMA_INS_VLAN BIT(16) + +/* QDMA descriptor txd3 */ +#define TX_DMA_OWNER_CPU BIT(31) +#define TX_DMA_LS0 BIT(30) +#define TX_DMA_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) +#define TX_DMA_PLEN1(x) ((x) & eth->soc->txrx.dma_max_len) +#define TX_DMA_SWC BIT(14) + +/* PDMA on MT7628 */ +#define TX_DMA_DONE BIT(31) +#define TX_DMA_LS1 BIT(14) +#define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) + +/* QDMA descriptor rxd2 */ +#define RX_DMA_DONE BIT(31) +#define RX_DMA_LSO BIT(30) +#define RX_DMA_PREP_PLEN0(x) (((x) & eth->soc->txrx.dma_max_len) << eth->soc->txrx.dma_len_offset) +#define RX_DMA_GET_PLEN0(x) (((x) >> eth->soc->txrx.dma_len_offset) & eth->soc->txrx.dma_max_len) +#define RX_DMA_VTAG BIT(15) + +/* QDMA descriptor rxd3 */ +#define RX_DMA_VID(x) ((x) & VLAN_VID_MASK) +#define RX_DMA_TCI(x) ((x) & (VLAN_PRIO_MASK | VLAN_VID_MASK)) +#define RX_DMA_VPID(x) (((x) >> 16) & 0xffff) + +/* QDMA descriptor rxd4 */ +#define MTK_RXD4_FOE_ENTRY GENMASK(13, 0) +#define MTK_RXD4_PPE_CPU_REASON GENMASK(18, 14) +#define MTK_RXD4_SRC_PORT GENMASK(21, 19) +#define MTK_RXD4_ALG GENMASK(31, 22) + +/* QDMA descriptor rxd4 */ +#define RX_DMA_L4_VALID BIT(24) +#define RX_DMA_L4_VALID_PDMA BIT(30) /* when PDMA is used */ +#define RX_DMA_SPECIAL_TAG BIT(22) + +/* PDMA descriptor rxd5 */ +#define MTK_RXD5_FOE_ENTRY GENMASK(14, 0) +#define MTK_RXD5_PPE_CPU_REASON GENMASK(22, 18) +#define MTK_RXD5_SRC_PORT GENMASK(29, 26) + +#define RX_DMA_GET_SPORT(x) (((x) >> 19) & 0x7) +#define RX_DMA_GET_SPORT_V2(x) (((x) >> 26) & 0xf) + +/* PDMA V2 descriptor rxd3 */ +#define RX_DMA_VTAG_V2 BIT(0) +#define RX_DMA_L4_VALID_V2 BIT(2) + +/* PHY Indirect Access Control registers */ +#define MTK_PHY_IAC 0x10004 +#define PHY_IAC_ACCESS BIT(31) +#define PHY_IAC_REG_MASK GENMASK(29, 25) +#define PHY_IAC_REG(x) FIELD_PREP(PHY_IAC_REG_MASK, (x)) +#define PHY_IAC_ADDR_MASK GENMASK(24, 20) +#define PHY_IAC_ADDR(x) FIELD_PREP(PHY_IAC_ADDR_MASK, (x)) +#define PHY_IAC_CMD_MASK GENMASK(19, 18) +#define PHY_IAC_CMD_C45_ADDR FIELD_PREP(PHY_IAC_CMD_MASK, 0) +#define PHY_IAC_CMD_WRITE FIELD_PREP(PHY_IAC_CMD_MASK, 1) +#define PHY_IAC_CMD_C22_READ FIELD_PREP(PHY_IAC_CMD_MASK, 2) +#define PHY_IAC_CMD_C45_READ FIELD_PREP(PHY_IAC_CMD_MASK, 3) +#define PHY_IAC_START_MASK GENMASK(17, 16) +#define PHY_IAC_START_C45 FIELD_PREP(PHY_IAC_START_MASK, 0) +#define PHY_IAC_START_C22 FIELD_PREP(PHY_IAC_START_MASK, 1) +#define PHY_IAC_DATA_MASK GENMASK(15, 0) +#define PHY_IAC_DATA(x) FIELD_PREP(PHY_IAC_DATA_MASK, (x)) +#define PHY_IAC_TIMEOUT HZ + +#define MTK_MAC_MISC 0x1000c +#define MTK_MUX_TO_ESW BIT(0) + +/* Mac control registers */ +#define MTK_MAC_MCR(x) (0x10100 + (x * 0x100)) +#define MAC_MCR_MAX_RX_MASK GENMASK(25, 24) +#define MAC_MCR_MAX_RX(_x) (MAC_MCR_MAX_RX_MASK & ((_x) << 24)) +#define MAC_MCR_MAX_RX_1518 0x0 +#define MAC_MCR_MAX_RX_1536 0x1 +#define MAC_MCR_MAX_RX_1552 0x2 +#define MAC_MCR_MAX_RX_2048 0x3 +#define MAC_MCR_IPG_CFG (BIT(18) | BIT(16)) +#define MAC_MCR_FORCE_MODE BIT(15) +#define MAC_MCR_TX_EN BIT(14) +#define MAC_MCR_RX_EN BIT(13) +#define MAC_MCR_RX_FIFO_CLR_DIS BIT(12) +#define MAC_MCR_BACKOFF_EN BIT(9) +#define MAC_MCR_BACKPR_EN BIT(8) +#define MAC_MCR_FORCE_RX_FC BIT(5) +#define MAC_MCR_FORCE_TX_FC BIT(4) +#define MAC_MCR_SPEED_1000 BIT(3) +#define MAC_MCR_SPEED_100 BIT(2) +#define MAC_MCR_FORCE_DPX BIT(1) +#define MAC_MCR_FORCE_LINK BIT(0) +#define MAC_MCR_FORCE_LINK_DOWN (MAC_MCR_FORCE_MODE) + +/* Mac status registers */ +#define MTK_MAC_MSR(x) (0x10108 + (x * 0x100)) +#define MAC_MSR_EEE1G BIT(7) +#define MAC_MSR_EEE100M BIT(6) +#define MAC_MSR_RX_FC BIT(5) +#define MAC_MSR_TX_FC BIT(4) +#define MAC_MSR_SPEED_1000 BIT(3) +#define MAC_MSR_SPEED_100 BIT(2) +#define MAC_MSR_SPEED_MASK (MAC_MSR_SPEED_1000 | MAC_MSR_SPEED_100) +#define MAC_MSR_DPX BIT(1) +#define MAC_MSR_LINK BIT(0) + +/* TRGMII RXC control register */ +#define TRGMII_RCK_CTRL 0x10300 +#define DQSI0(x) ((x << 0) & GENMASK(6, 0)) +#define DQSI1(x) ((x << 8) & GENMASK(14, 8)) +#define RXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) +#define RXC_RST BIT(31) +#define RXC_DQSISEL BIT(30) +#define RCK_CTRL_RGMII_1000 (RXC_DQSISEL | RXCTL_DMWTLAT(2) | DQSI1(16)) +#define RCK_CTRL_RGMII_10_100 RXCTL_DMWTLAT(2) + +#define NUM_TRGMII_CTRL 5 + +/* TRGMII RXC control register */ +#define TRGMII_TCK_CTRL 0x10340 +#define TXCTL_DMWTLAT(x) ((x << 16) & GENMASK(18, 16)) +#define TXC_INV BIT(30) +#define TCK_CTRL_RGMII_1000 TXCTL_DMWTLAT(2) +#define TCK_CTRL_RGMII_10_100 (TXC_INV | TXCTL_DMWTLAT(2)) + +/* TRGMII TX Drive Strength */ +#define TRGMII_TD_ODT(i) (0x10354 + 8 * (i)) +#define TD_DM_DRVP(x) ((x) & 0xf) +#define TD_DM_DRVN(x) (((x) & 0xf) << 4) + +/* TRGMII Interface mode register */ +#define INTF_MODE 0x10390 +#define TRGMII_INTF_DIS BIT(0) +#define TRGMII_MODE BIT(1) +#define TRGMII_CENTRAL_ALIGNED BIT(2) +#define INTF_MODE_RGMII_1000 (TRGMII_MODE | TRGMII_CENTRAL_ALIGNED) +#define INTF_MODE_RGMII_10_100 0 + +/* GPIO port control registers for GMAC 2*/ +#define GPIO_OD33_CTRL8 0x4c0 +#define GPIO_BIAS_CTRL 0xed0 +#define GPIO_DRV_SEL10 0xf00 + +/* ethernet subsystem chip id register */ +#define ETHSYS_CHIPID0_3 0x0 +#define ETHSYS_CHIPID4_7 0x4 +#define MT7623_ETH 7623 +#define MT7622_ETH 7622 +#define MT7621_ETH 7621 + +/* ethernet system control register */ +#define ETHSYS_SYSCFG 0x10 +#define SYSCFG_DRAM_TYPE_DDR2 BIT(4) + +/* ethernet subsystem config register */ +#define ETHSYS_SYSCFG0 0x14 +#define SYSCFG0_GE_MASK 0x3 +#define SYSCFG0_GE_MODE(x, y) (x << (12 + (y * 2))) +#define SYSCFG0_SGMII_MASK GENMASK(9, 8) +#define SYSCFG0_SGMII_GMAC1 ((2 << 8) & SYSCFG0_SGMII_MASK) +#define SYSCFG0_SGMII_GMAC2 ((3 << 8) & SYSCFG0_SGMII_MASK) +#define SYSCFG0_SGMII_GMAC1_V2 BIT(9) +#define SYSCFG0_SGMII_GMAC2_V2 BIT(8) + + +/* ethernet subsystem clock register */ +#define ETHSYS_CLKCFG0 0x2c +#define ETHSYS_TRGMII_CLK_SEL362_5 BIT(11) +#define ETHSYS_TRGMII_MT7621_MASK (BIT(5) | BIT(6)) +#define ETHSYS_TRGMII_MT7621_APLL BIT(6) +#define ETHSYS_TRGMII_MT7621_DDR_PLL BIT(5) + +/* ethernet reset control register */ +#define ETHSYS_RSTCTRL 0x34 +#define RSTCTRL_FE BIT(6) +#define RSTCTRL_PPE0 BIT(31) +#define RSTCTRL_PPE0_V2 BIT(30) +#define RSTCTRL_PPE1 BIT(31) +#define RSTCTRL_ETH BIT(23) + +/* ethernet reset check idle register */ +#define ETHSYS_FE_RST_CHK_IDLE_EN 0x28 + +/* ethernet dma channel agent map */ +#define ETHSYS_DMA_AG_MAP 0x408 +#define ETHSYS_DMA_AG_MAP_PDMA BIT(0) +#define ETHSYS_DMA_AG_MAP_QDMA BIT(1) +#define ETHSYS_DMA_AG_MAP_PPE BIT(2) + +/* SGMII subsystem config registers */ +/* Register to auto-negotiation restart */ +#define SGMSYS_PCS_CONTROL_1 0x0 +#define SGMII_AN_RESTART BIT(9) +#define SGMII_ISOLATE BIT(10) +#define SGMII_AN_ENABLE BIT(12) +#define SGMII_LINK_STATYS BIT(18) +#define SGMII_AN_ABILITY BIT(19) +#define SGMII_AN_COMPLETE BIT(21) +#define SGMII_PCS_FAULT BIT(23) +#define SGMII_AN_EXPANSION_CLR BIT(30) + +/* Register to programmable link timer, the unit in 2 * 8ns */ +#define SGMSYS_PCS_LINK_TIMER 0x18 +#define SGMII_LINK_TIMER_DEFAULT (0x186a0 & GENMASK(19, 0)) + +/* Register to control remote fault */ +#define SGMSYS_SGMII_MODE 0x20 +#define SGMII_IF_MODE_BIT0 BIT(0) +#define SGMII_SPEED_DUPLEX_AN BIT(1) +#define SGMII_SPEED_MASK GENMASK(3, 2) +#define SGMII_SPEED_10 FIELD_PREP(SGMII_SPEED_MASK, 0) +#define SGMII_SPEED_100 FIELD_PREP(SGMII_SPEED_MASK, 1) +#define SGMII_SPEED_1000 FIELD_PREP(SGMII_SPEED_MASK, 2) +#define SGMII_DUPLEX_FULL BIT(4) +#define SGMII_IF_MODE_BIT5 BIT(5) +#define SGMII_REMOTE_FAULT_DIS BIT(8) +#define SGMII_CODE_SYNC_SET_VAL BIT(9) +#define SGMII_CODE_SYNC_SET_EN BIT(10) +#define SGMII_SEND_AN_ERROR_EN BIT(11) +#define SGMII_IF_MODE_MASK GENMASK(5, 1) + +/* Register to set SGMII speed, ANA RG_ Control Signals III*/ +#define SGMSYS_ANA_RG_CS3 0x2028 +#define RG_PHY_SPEED_MASK (BIT(2) | BIT(3)) +#define RG_PHY_SPEED_1_25G 0x0 +#define RG_PHY_SPEED_3_125G BIT(2) + +/* Register to power up QPHY */ +#define SGMSYS_QPHY_PWR_STATE_CTRL 0xe8 +#define SGMII_PHYA_PWD BIT(4) + +/* Infrasys subsystem config registers */ +#define INFRA_MISC2 0x70c +#define CO_QPHY_SEL BIT(0) +#define GEPHY_MAC_SEL BIT(1) + +/* MT7628/88 specific stuff */ +#define MT7628_PDMA_OFFSET 0x0800 +#define MT7628_SDM_OFFSET 0x0c00 + +#define MT7628_TX_BASE_PTR0 (MT7628_PDMA_OFFSET + 0x00) +#define MT7628_TX_MAX_CNT0 (MT7628_PDMA_OFFSET + 0x04) +#define MT7628_TX_CTX_IDX0 (MT7628_PDMA_OFFSET + 0x08) +#define MT7628_TX_DTX_IDX0 (MT7628_PDMA_OFFSET + 0x0c) +#define MT7628_PST_DTX_IDX0 BIT(0) + +#define MT7628_SDM_MAC_ADRL (MT7628_SDM_OFFSET + 0x0c) +#define MT7628_SDM_MAC_ADRH (MT7628_SDM_OFFSET + 0x10) + +/* Counter / stat register */ +#define MT7628_SDM_TPCNT (MT7628_SDM_OFFSET + 0x100) +#define MT7628_SDM_TBCNT (MT7628_SDM_OFFSET + 0x104) +#define MT7628_SDM_RPCNT (MT7628_SDM_OFFSET + 0x108) +#define MT7628_SDM_RBCNT (MT7628_SDM_OFFSET + 0x10c) +#define MT7628_SDM_CS_ERR (MT7628_SDM_OFFSET + 0x110) + +struct mtk_rx_dma { + unsigned int rxd1; + unsigned int rxd2; + unsigned int rxd3; + unsigned int rxd4; +} __packed __aligned(4); + +struct mtk_rx_dma_v2 { + unsigned int rxd1; + unsigned int rxd2; + unsigned int rxd3; + unsigned int rxd4; + unsigned int rxd5; + unsigned int rxd6; + unsigned int rxd7; + unsigned int rxd8; +} __packed __aligned(4); + +struct mtk_tx_dma { + unsigned int txd1; + unsigned int txd2; + unsigned int txd3; + unsigned int txd4; +} __packed __aligned(4); + +struct mtk_tx_dma_v2 { + unsigned int txd1; + unsigned int txd2; + unsigned int txd3; + unsigned int txd4; + unsigned int txd5; + unsigned int txd6; + unsigned int txd7; + unsigned int txd8; +} __packed __aligned(4); + +struct mtk_eth; +struct mtk_mac; + +struct mtk_xdp_stats { + u64 rx_xdp_redirect; + u64 rx_xdp_pass; + u64 rx_xdp_drop; + u64 rx_xdp_tx; + u64 rx_xdp_tx_errors; + u64 tx_xdp_xmit; + u64 tx_xdp_xmit_errors; +}; + +/* struct mtk_hw_stats - the structure that holds the traffic statistics. + * @stats_lock: make sure that stats operations are atomic + * @reg_offset: the status register offset of the SoC + * @syncp: the refcount + * + * All of the supported SoCs have hardware counters for traffic statistics. + * Whenever the status IRQ triggers we can read the latest stats from these + * counters and store them in this struct. + */ +struct mtk_hw_stats { + u64 tx_bytes; + u64 tx_packets; + u64 tx_skip; + u64 tx_collisions; + u64 rx_bytes; + u64 rx_packets; + u64 rx_overflow; + u64 rx_fcs_errors; + u64 rx_short_errors; + u64 rx_long_errors; + u64 rx_checksum_errors; + u64 rx_flow_control_packets; + + struct mtk_xdp_stats xdp_stats; + + spinlock_t stats_lock; + u32 reg_offset; + struct u64_stats_sync syncp; +}; + +enum mtk_tx_flags { + /* PDMA descriptor can point at 1-2 segments. This enum allows us to + * track how memory was allocated so that it can be freed properly. + */ + MTK_TX_FLAGS_SINGLE0 = 0x01, + MTK_TX_FLAGS_PAGE0 = 0x02, + + /* MTK_TX_FLAGS_FPORTx allows tracking which port the transmitted + * SKB out instead of looking up through hardware TX descriptor. + */ + MTK_TX_FLAGS_FPORT0 = 0x04, + MTK_TX_FLAGS_FPORT1 = 0x08, +}; + +/* This enum allows us to identify how the clock is defined on the array of the + * clock in the order + */ +enum mtk_clks_map { + MTK_CLK_ETHIF, + MTK_CLK_SGMIITOP, + MTK_CLK_ESW, + MTK_CLK_GP0, + MTK_CLK_GP1, + MTK_CLK_GP2, + MTK_CLK_FE, + MTK_CLK_TRGPLL, + MTK_CLK_SGMII_TX_250M, + MTK_CLK_SGMII_RX_250M, + MTK_CLK_SGMII_CDR_REF, + MTK_CLK_SGMII_CDR_FB, + MTK_CLK_SGMII2_TX_250M, + MTK_CLK_SGMII2_RX_250M, + MTK_CLK_SGMII2_CDR_REF, + MTK_CLK_SGMII2_CDR_FB, + MTK_CLK_SGMII_CK, + MTK_CLK_ETH2PLL, + MTK_CLK_WOCPU0, + MTK_CLK_WOCPU1, + MTK_CLK_NETSYS0, + MTK_CLK_NETSYS1, + MTK_CLK_MAX +}; + +#define MT7623_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP1) | BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_TRGPLL)) +#define MT7622_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_GP2) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII_CK) | \ + BIT(MTK_CLK_ETH2PLL)) +#define MT7621_CLKS_BITMAP (0) +#define MT7628_CLKS_BITMAP (0) +#define MT7629_CLKS_BITMAP (BIT(MTK_CLK_ETHIF) | BIT(MTK_CLK_ESW) | \ + BIT(MTK_CLK_GP0) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_GP2) | BIT(MTK_CLK_FE) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII2_TX_250M) | \ + BIT(MTK_CLK_SGMII2_RX_250M) | \ + BIT(MTK_CLK_SGMII2_CDR_REF) | \ + BIT(MTK_CLK_SGMII2_CDR_FB) | \ + BIT(MTK_CLK_SGMII_CK) | \ + BIT(MTK_CLK_ETH2PLL) | BIT(MTK_CLK_SGMIITOP)) +#define MT7986_CLKS_BITMAP (BIT(MTK_CLK_FE) | BIT(MTK_CLK_GP2) | BIT(MTK_CLK_GP1) | \ + BIT(MTK_CLK_WOCPU1) | BIT(MTK_CLK_WOCPU0) | \ + BIT(MTK_CLK_SGMII_TX_250M) | \ + BIT(MTK_CLK_SGMII_RX_250M) | \ + BIT(MTK_CLK_SGMII_CDR_REF) | \ + BIT(MTK_CLK_SGMII_CDR_FB) | \ + BIT(MTK_CLK_SGMII2_TX_250M) | \ + BIT(MTK_CLK_SGMII2_RX_250M) | \ + BIT(MTK_CLK_SGMII2_CDR_REF) | \ + BIT(MTK_CLK_SGMII2_CDR_FB)) + +enum mtk_dev_state { + MTK_HW_INIT, + MTK_RESETTING +}; + +enum mtk_tx_buf_type { + MTK_TYPE_SKB, + MTK_TYPE_XDP_TX, + MTK_TYPE_XDP_NDO, +}; + +/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at + * by the TX descriptor s + * @skb: The SKB pointer of the packet being sent + * @dma_addr0: The base addr of the first segment + * @dma_len0: The length of the first segment + * @dma_addr1: The base addr of the second segment + * @dma_len1: The length of the second segment + */ +struct mtk_tx_buf { + enum mtk_tx_buf_type type; + void *data; + + u32 flags; + DEFINE_DMA_UNMAP_ADDR(dma_addr0); + DEFINE_DMA_UNMAP_LEN(dma_len0); + DEFINE_DMA_UNMAP_ADDR(dma_addr1); + DEFINE_DMA_UNMAP_LEN(dma_len1); +}; + +/* struct mtk_tx_ring - This struct holds info describing a TX ring + * @dma: The descriptor ring + * @buf: The memory pointed at by the ring + * @phys: The physical addr of tx_buf + * @next_free: Pointer to the next free descriptor + * @last_free: Pointer to the last free descriptor + * @last_free_ptr: Hardware pointer value of the last free descriptor + * @thresh: The threshold of minimum amount of free descriptors + * @free_count: QDMA uses a linked list. Track how many free descriptors + * are present + */ +struct mtk_tx_ring { + void *dma; + struct mtk_tx_buf *buf; + dma_addr_t phys; + struct mtk_tx_dma *next_free; + struct mtk_tx_dma *last_free; + u32 last_free_ptr; + u16 thresh; + atomic_t free_count; + int dma_size; + struct mtk_tx_dma *dma_pdma; /* For MT7628/88 PDMA handling */ + dma_addr_t phys_pdma; + int cpu_idx; +}; + +/* PDMA rx ring mode */ +enum mtk_rx_flags { + MTK_RX_FLAGS_NORMAL = 0, + MTK_RX_FLAGS_HWLRO, + MTK_RX_FLAGS_QDMA, +}; + +/* struct mtk_rx_ring - This struct holds info describing a RX ring + * @dma: The descriptor ring + * @data: The memory pointed at by the ring + * @phys: The physical addr of rx_buf + * @frag_size: How big can each fragment be + * @buf_size: The size of each packet buffer + * @calc_idx: The current head of ring + */ +struct mtk_rx_ring { + void *dma; + u8 **data; + dma_addr_t phys; + u16 frag_size; + u16 buf_size; + u16 dma_size; + bool calc_idx_update; + u16 calc_idx; + u32 crx_idx_reg; + /* page_pool */ + struct page_pool *page_pool; + struct xdp_rxq_info xdp_q; +}; + +enum mkt_eth_capabilities { + MTK_RGMII_BIT = 0, + MTK_TRGMII_BIT, + MTK_SGMII_BIT, + MTK_ESW_BIT, + MTK_GEPHY_BIT, + MTK_MUX_BIT, + MTK_INFRA_BIT, + MTK_SHARED_SGMII_BIT, + MTK_HWLRO_BIT, + MTK_SHARED_INT_BIT, + MTK_TRGMII_MT7621_CLK_BIT, + MTK_QDMA_BIT, + MTK_NETSYS_V2_BIT, + MTK_SOC_MT7628_BIT, + MTK_RSTCTRL_PPE1_BIT, + + /* MUX BITS*/ + MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT, + MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT, + MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT, + MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT, + MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT, + + /* PATH BITS */ + MTK_ETH_PATH_GMAC1_RGMII_BIT, + MTK_ETH_PATH_GMAC1_TRGMII_BIT, + MTK_ETH_PATH_GMAC1_SGMII_BIT, + MTK_ETH_PATH_GMAC2_RGMII_BIT, + MTK_ETH_PATH_GMAC2_SGMII_BIT, + MTK_ETH_PATH_GMAC2_GEPHY_BIT, + MTK_ETH_PATH_GDM1_ESW_BIT, +}; + +/* Supported hardware group on SoCs */ +#define MTK_RGMII BIT(MTK_RGMII_BIT) +#define MTK_TRGMII BIT(MTK_TRGMII_BIT) +#define MTK_SGMII BIT(MTK_SGMII_BIT) +#define MTK_ESW BIT(MTK_ESW_BIT) +#define MTK_GEPHY BIT(MTK_GEPHY_BIT) +#define MTK_MUX BIT(MTK_MUX_BIT) +#define MTK_INFRA BIT(MTK_INFRA_BIT) +#define MTK_SHARED_SGMII BIT(MTK_SHARED_SGMII_BIT) +#define MTK_HWLRO BIT(MTK_HWLRO_BIT) +#define MTK_SHARED_INT BIT(MTK_SHARED_INT_BIT) +#define MTK_TRGMII_MT7621_CLK BIT(MTK_TRGMII_MT7621_CLK_BIT) +#define MTK_QDMA BIT(MTK_QDMA_BIT) +#define MTK_NETSYS_V2 BIT(MTK_NETSYS_V2_BIT) +#define MTK_SOC_MT7628 BIT(MTK_SOC_MT7628_BIT) +#define MTK_RSTCTRL_PPE1 BIT(MTK_RSTCTRL_PPE1_BIT) + +#define MTK_ETH_MUX_GDM1_TO_GMAC1_ESW \ + BIT(MTK_ETH_MUX_GDM1_TO_GMAC1_ESW_BIT) +#define MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY \ + BIT(MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY_BIT) +#define MTK_ETH_MUX_U3_GMAC2_TO_QPHY \ + BIT(MTK_ETH_MUX_U3_GMAC2_TO_QPHY_BIT) +#define MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ + BIT(MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII_BIT) +#define MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII \ + BIT(MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII_BIT) + +/* Supported path present on SoCs */ +#define MTK_ETH_PATH_GMAC1_RGMII BIT(MTK_ETH_PATH_GMAC1_RGMII_BIT) +#define MTK_ETH_PATH_GMAC1_TRGMII BIT(MTK_ETH_PATH_GMAC1_TRGMII_BIT) +#define MTK_ETH_PATH_GMAC1_SGMII BIT(MTK_ETH_PATH_GMAC1_SGMII_BIT) +#define MTK_ETH_PATH_GMAC2_RGMII BIT(MTK_ETH_PATH_GMAC2_RGMII_BIT) +#define MTK_ETH_PATH_GMAC2_SGMII BIT(MTK_ETH_PATH_GMAC2_SGMII_BIT) +#define MTK_ETH_PATH_GMAC2_GEPHY BIT(MTK_ETH_PATH_GMAC2_GEPHY_BIT) +#define MTK_ETH_PATH_GDM1_ESW BIT(MTK_ETH_PATH_GDM1_ESW_BIT) + +#define MTK_GMAC1_RGMII (MTK_ETH_PATH_GMAC1_RGMII | MTK_RGMII) +#define MTK_GMAC1_TRGMII (MTK_ETH_PATH_GMAC1_TRGMII | MTK_TRGMII) +#define MTK_GMAC1_SGMII (MTK_ETH_PATH_GMAC1_SGMII | MTK_SGMII) +#define MTK_GMAC2_RGMII (MTK_ETH_PATH_GMAC2_RGMII | MTK_RGMII) +#define MTK_GMAC2_SGMII (MTK_ETH_PATH_GMAC2_SGMII | MTK_SGMII) +#define MTK_GMAC2_GEPHY (MTK_ETH_PATH_GMAC2_GEPHY | MTK_GEPHY) +#define MTK_GDM1_ESW (MTK_ETH_PATH_GDM1_ESW | MTK_ESW) + +/* MUXes present on SoCs */ +/* 0: GDM1 -> GMAC1, 1: GDM1 -> ESW */ +#define MTK_MUX_GDM1_TO_GMAC1_ESW (MTK_ETH_MUX_GDM1_TO_GMAC1_ESW | MTK_MUX) + +/* 0: GMAC2 -> GEPHY, 1: GMAC0 -> GePHY */ +#define MTK_MUX_GMAC2_GMAC0_TO_GEPHY \ + (MTK_ETH_MUX_GMAC2_GMAC0_TO_GEPHY | MTK_MUX | MTK_INFRA) + +/* 0: U3 -> QPHY, 1: GMAC2 -> QPHY */ +#define MTK_MUX_U3_GMAC2_TO_QPHY \ + (MTK_ETH_MUX_U3_GMAC2_TO_QPHY | MTK_MUX | MTK_INFRA) + +/* 2: GMAC1 -> SGMII, 3: GMAC2 -> SGMII */ +#define MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII \ + (MTK_ETH_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_MUX | \ + MTK_SHARED_SGMII) + +/* 0: GMACx -> GEPHY, 1: GMACx -> SGMII where x is 1 or 2 */ +#define MTK_MUX_GMAC12_TO_GEPHY_SGMII \ + (MTK_ETH_MUX_GMAC12_TO_GEPHY_SGMII | MTK_MUX) + +#define MTK_HAS_CAPS(caps, _x) (((caps) & (_x)) == (_x)) + +#define MT7621_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | \ + MTK_GMAC2_RGMII | MTK_SHARED_INT | \ + MTK_TRGMII_MT7621_CLK | MTK_QDMA) + +#define MT7622_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_SGMII | MTK_GMAC2_RGMII | \ + MTK_GMAC2_SGMII | MTK_GDM1_ESW | \ + MTK_MUX_GDM1_TO_GMAC1_ESW | \ + MTK_MUX_GMAC1_GMAC2_TO_SGMII_RGMII | MTK_QDMA) + +#define MT7623_CAPS (MTK_GMAC1_RGMII | MTK_GMAC1_TRGMII | MTK_GMAC2_RGMII | \ + MTK_QDMA) + +#define MT7628_CAPS (MTK_SHARED_INT | MTK_SOC_MT7628) + +#define MT7629_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | MTK_GMAC2_GEPHY | \ + MTK_GDM1_ESW | MTK_MUX_GDM1_TO_GMAC1_ESW | \ + MTK_MUX_GMAC2_GMAC0_TO_GEPHY | \ + MTK_MUX_U3_GMAC2_TO_QPHY | \ + MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA) + +#define MT7986_CAPS (MTK_GMAC1_SGMII | MTK_GMAC2_SGMII | \ + MTK_MUX_GMAC12_TO_GEPHY_SGMII | MTK_QDMA | \ + MTK_NETSYS_V2 | MTK_RSTCTRL_PPE1) + +struct mtk_tx_dma_desc_info { + dma_addr_t addr; + u32 size; + u16 vlan_tci; + u16 qid; + u8 gso:1; + u8 csum:1; + u8 vlan:1; + u8 first:1; + u8 last:1; +}; + +struct mtk_reg_map { + u32 tx_irq_mask; + u32 tx_irq_status; + struct { + u32 rx_ptr; /* rx base pointer */ + u32 rx_cnt_cfg; /* rx max count configuration */ + u32 pcrx_ptr; /* rx cpu pointer */ + u32 glo_cfg; /* global configuration */ + u32 rst_idx; /* reset index */ + u32 delay_irq; /* delay interrupt */ + u32 irq_status; /* interrupt status */ + u32 irq_mask; /* interrupt mask */ + u32 int_grp; + } pdma; + struct { + u32 qtx_cfg; /* tx queue configuration */ + u32 rx_ptr; /* rx base pointer */ + u32 rx_cnt_cfg; /* rx max count configuration */ + u32 qcrx_ptr; /* rx cpu pointer */ + u32 glo_cfg; /* global configuration */ + u32 rst_idx; /* reset index */ + u32 delay_irq; /* delay interrupt */ + u32 fc_th; /* flow control */ + u32 int_grp; + u32 hred; /* interrupt mask */ + u32 ctx_ptr; /* tx acquire cpu pointer */ + u32 dtx_ptr; /* tx acquire dma pointer */ + u32 crx_ptr; /* tx release cpu pointer */ + u32 drx_ptr; /* tx release dma pointer */ + u32 fq_head; /* fq head pointer */ + u32 fq_tail; /* fq tail pointer */ + u32 fq_count; /* fq free page count */ + u32 fq_blen; /* fq free page buffer length */ + } qdma; + u32 gdm1_cnt; + u32 gdma_to_ppe; + u32 ppe_base; + u32 wdma_base[2]; +}; + +/* struct mtk_eth_data - This is the structure holding all differences + * among various plaforms + * @reg_map Soc register map. + * @ana_rgc3: The offset for register ANA_RGC3 related to + * sgmiisys syscon + * @caps Flags shown the extra capability for the SoC + * @hw_features Flags shown HW features + * @required_clks Flags shown the bitmap for required clocks on + * the target SoC + * @required_pctl A bool value to show whether the SoC requires + * the extra setup for those pins used by GMAC. + * @hash_offset Flow table hash offset. + * @foe_entry_size Foe table entry size. + * @txd_size Tx DMA descriptor size. + * @rxd_size Rx DMA descriptor size. + * @rx_irq_done_mask Rx irq done register mask. + * @rx_dma_l4_valid Rx DMA valid register mask. + * @dma_max_len Max DMA tx/rx buffer length. + * @dma_len_offset Tx/Rx DMA length field offset. + */ +struct mtk_soc_data { + const struct mtk_reg_map *reg_map; + u32 ana_rgc3; + u32 caps; + u32 required_clks; + bool required_pctl; + u8 offload_version; + u8 hash_offset; + u16 foe_entry_size; + netdev_features_t hw_features; + struct { + u32 txd_size; + u32 rxd_size; + u32 rx_irq_done_mask; + u32 rx_dma_l4_valid; + u32 dma_max_len; + u32 dma_len_offset; + } txrx; +}; + +/* currently no SoC has more than 2 macs */ +#define MTK_MAX_DEVS 2 + +/* struct mtk_pcs - This structure holds each sgmii regmap and associated + * data + * @regmap: The register map pointing at the range used to setup + * SGMII modes + * @ana_rgc3: The offset refers to register ANA_RGC3 related to regmap + * @pcs: Phylink PCS structure + */ +struct mtk_pcs { + struct regmap *regmap; + u32 ana_rgc3; + struct phylink_pcs pcs; +}; + +/* struct mtk_sgmii - This is the structure holding sgmii regmap and its + * characteristics + * @pcs Array of individual PCS structures + */ +struct mtk_sgmii { + struct mtk_pcs pcs[MTK_MAX_DEVS]; +}; + +/* struct mtk_eth - This is the main datasructure for holding the state + * of the driver + * @dev: The device pointer + * @dev: The device pointer used for dma mapping/alloc + * @base: The mapped register i/o base + * @page_lock: Make sure that register operations are atomic + * @tx_irq__lock: Make sure that IRQ register operations are atomic + * @rx_irq__lock: Make sure that IRQ register operations are atomic + * @dim_lock: Make sure that Net DIM operations are atomic + * @dummy_dev: we run 2 netdevs on 1 physical DMA ring and need a + * dummy for NAPI to work + * @netdev: The netdev instances + * @mac: Each netdev is linked to a physical MAC + * @irq: The IRQ that we are using + * @msg_enable: Ethtool msg level + * @ethsys: The register map pointing at the range used to setup + * MII modes + * @infra: The register map pointing at the range used to setup + * SGMII and GePHY path + * @pctl: The register map pointing at the range used to setup + * GMAC port drive/slew values + * @dma_refcnt: track how many netdevs are using the DMA engine + * @tx_ring: Pointer to the memory holding info about the TX ring + * @rx_ring: Pointer to the memory holding info about the RX ring + * @rx_ring_qdma: Pointer to the memory holding info about the QDMA RX ring + * @tx_napi: The TX NAPI struct + * @rx_napi: The RX NAPI struct + * @rx_events: Net DIM RX event counter + * @rx_packets: Net DIM RX packet counter + * @rx_bytes: Net DIM RX byte counter + * @rx_dim: Net DIM RX context + * @tx_events: Net DIM TX event counter + * @tx_packets: Net DIM TX packet counter + * @tx_bytes: Net DIM TX byte counter + * @tx_dim: Net DIM TX context + * @scratch_ring: Newer SoCs need memory for a second HW managed TX ring + * @phy_scratch_ring: physical address of scratch_ring + * @scratch_head: The scratch memory that scratch_ring points to. + * @clks: clock array for all clocks required + * @mii_bus: If there is a bus we need to create an instance for it + * @pending_work: The workqueue used to reset the dma ring + * @state: Initialization and runtime state of the device + * @soc: Holding specific data among vaious SoCs + */ + +struct mtk_eth { + struct device *dev; + struct device *dma_dev; + void __iomem *base; + spinlock_t page_lock; + spinlock_t tx_irq_lock; + spinlock_t rx_irq_lock; + struct net_device dummy_dev; + struct net_device *netdev[MTK_MAX_DEVS]; + struct mtk_mac *mac[MTK_MAX_DEVS]; + int irq[3]; + u32 msg_enable; + unsigned long sysclk; + struct regmap *ethsys; + struct regmap *infra; + struct mtk_sgmii *sgmii; + struct regmap *pctl; + bool hwlro; + refcount_t dma_refcnt; + struct mtk_tx_ring tx_ring; + struct mtk_rx_ring rx_ring[MTK_MAX_RX_RING_NUM]; + struct mtk_rx_ring rx_ring_qdma; + struct napi_struct tx_napi; + struct napi_struct rx_napi; + void *scratch_ring; + dma_addr_t phy_scratch_ring; + void *scratch_head; + struct clk *clks[MTK_CLK_MAX]; + + struct mii_bus *mii_bus; + struct work_struct pending_work; + unsigned long state; + + const struct mtk_soc_data *soc; + + spinlock_t dim_lock; + + u32 rx_events; + u32 rx_packets; + u32 rx_bytes; + struct dim rx_dim; + + u32 tx_events; + u32 tx_packets; + u32 tx_bytes; + struct dim tx_dim; + + int ip_align; + + struct mtk_ppe *ppe[2]; + struct rhashtable flow_table; + + struct bpf_prog __rcu *prog; +}; + +/* struct mtk_mac - the structure that holds the info about the MACs of the + * SoC + * @id: The number of the MAC + * @interface: Interface mode kept for detecting change in hw settings + * @of_node: Our devicetree node + * @hw: Backpointer to our main datastruture + * @hw_stats: Packet statistics counter + */ +struct mtk_mac { + int id; + phy_interface_t interface; + int speed; + struct device_node *of_node; + struct phylink *phylink; + struct phylink_config phylink_config; + struct mtk_eth *hw; + struct mtk_hw_stats *hw_stats; + __be32 hwlro_ip[MTK_MAX_LRO_IP_CNT]; + int hwlro_ip_cnt; + unsigned int syscfg0; +}; + +/* the struct describing the SoC. these are declared in the soc_xyz.c files */ +extern const struct of_device_id of_mtk_match[]; + +static inline struct mtk_foe_entry * +mtk_foe_get_entry(struct mtk_ppe *ppe, u16 hash) +{ + const struct mtk_soc_data *soc = ppe->eth->soc; + + return ppe->foe_table + hash * soc->foe_entry_size; +} + +static inline u32 mtk_get_ib1_ts_mask(struct mtk_eth *eth) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + return MTK_FOE_IB1_BIND_TIMESTAMP_V2; + + return MTK_FOE_IB1_BIND_TIMESTAMP; +} + +static inline u32 mtk_get_ib1_ppoe_mask(struct mtk_eth *eth) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + return MTK_FOE_IB1_BIND_PPPOE_V2; + + return MTK_FOE_IB1_BIND_PPPOE; +} + +static inline u32 mtk_get_ib1_vlan_tag_mask(struct mtk_eth *eth) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + return MTK_FOE_IB1_BIND_VLAN_TAG_V2; + + return MTK_FOE_IB1_BIND_VLAN_TAG; +} + +static inline u32 mtk_get_ib1_vlan_layer_mask(struct mtk_eth *eth) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + return MTK_FOE_IB1_BIND_VLAN_LAYER_V2; + + return MTK_FOE_IB1_BIND_VLAN_LAYER; +} + +static inline u32 mtk_prep_ib1_vlan_layer(struct mtk_eth *eth, u32 val) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); + + return FIELD_PREP(MTK_FOE_IB1_BIND_VLAN_LAYER, val); +} + +static inline u32 mtk_get_ib1_vlan_layer(struct mtk_eth *eth, u32 val) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER_V2, val); + + return FIELD_GET(MTK_FOE_IB1_BIND_VLAN_LAYER, val); +} + +static inline u32 mtk_get_ib1_pkt_type_mask(struct mtk_eth *eth) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + return MTK_FOE_IB1_PACKET_TYPE_V2; + + return MTK_FOE_IB1_PACKET_TYPE; +} + +static inline u32 mtk_get_ib1_pkt_type(struct mtk_eth *eth, u32 val) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE_V2, val); + + return FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, val); +} + +static inline u32 mtk_get_ib2_multicast_mask(struct mtk_eth *eth) +{ + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + return MTK_FOE_IB2_MULTICAST_V2; + + return MTK_FOE_IB2_MULTICAST; +} + +/* read the hardware status register */ +void mtk_stats_update_mac(struct mtk_mac *mac); + +void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg); +u32 mtk_r32(struct mtk_eth *eth, unsigned reg); + +struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id); +int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *np, + u32 ana_rgc3); + +int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); +int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); + +int mtk_eth_offload_init(struct mtk_eth *eth); +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data); +void mtk_eth_set_dma_device(struct mtk_eth *eth, struct device *dma_dev); + + +#endif /* MTK_ETH_H */ diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c new file mode 100644 index 000000000..d6eed2045 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -0,0 +1,911 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/etherdevice.h> +#include <linux/platform_device.h> +#include <linux/if_ether.h> +#include <linux/if_vlan.h> +#include <net/dsa.h> +#include "mtk_eth_soc.h" +#include "mtk_ppe.h" +#include "mtk_ppe_regs.h" + +static DEFINE_SPINLOCK(ppe_lock); + +static const struct rhashtable_params mtk_flow_l2_ht_params = { + .head_offset = offsetof(struct mtk_flow_entry, l2_node), + .key_offset = offsetof(struct mtk_flow_entry, data.bridge), + .key_len = offsetof(struct mtk_foe_bridge, key_end), + .automatic_shrinking = true, +}; + +static void ppe_w32(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + writel(val, ppe->base + reg); +} + +static u32 ppe_r32(struct mtk_ppe *ppe, u32 reg) +{ + return readl(ppe->base + reg); +} + +static u32 ppe_m32(struct mtk_ppe *ppe, u32 reg, u32 mask, u32 set) +{ + u32 val; + + val = ppe_r32(ppe, reg); + val &= ~mask; + val |= set; + ppe_w32(ppe, reg, val); + + return val; +} + +static u32 ppe_set(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + return ppe_m32(ppe, reg, 0, val); +} + +static u32 ppe_clear(struct mtk_ppe *ppe, u32 reg, u32 val) +{ + return ppe_m32(ppe, reg, val, 0); +} + +static u32 mtk_eth_timestamp(struct mtk_eth *eth) +{ + return mtk_r32(eth, 0x0010) & mtk_get_ib1_ts_mask(eth); +} + +static int mtk_ppe_wait_busy(struct mtk_ppe *ppe) +{ + int ret; + u32 val; + + ret = readl_poll_timeout(ppe->base + MTK_PPE_GLO_CFG, val, + !(val & MTK_PPE_GLO_CFG_BUSY), + 20, MTK_PPE_WAIT_TIMEOUT_US); + + if (ret) + dev_err(ppe->dev, "PPE table busy"); + + return ret; +} + +static void mtk_ppe_cache_clear(struct mtk_ppe *ppe) +{ + ppe_set(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); + ppe_clear(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_CLEAR); +} + +static void mtk_ppe_cache_enable(struct mtk_ppe *ppe, bool enable) +{ + mtk_ppe_cache_clear(ppe); + + ppe_m32(ppe, MTK_PPE_CACHE_CTL, MTK_PPE_CACHE_CTL_EN, + enable * MTK_PPE_CACHE_CTL_EN); +} + +static u32 mtk_ppe_hash_entry(struct mtk_eth *eth, struct mtk_foe_entry *e) +{ + u32 hv1, hv2, hv3; + u32 hash; + + switch (mtk_get_ib1_pkt_type(eth, e->ib1)) { + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + hv1 = e->ipv4.orig.ports; + hv2 = e->ipv4.orig.dest_ip; + hv3 = e->ipv4.orig.src_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + hv1 = e->ipv6.src_ip[3] ^ e->ipv6.dest_ip[3]; + hv1 ^= e->ipv6.ports; + + hv2 = e->ipv6.src_ip[2] ^ e->ipv6.dest_ip[2]; + hv2 ^= e->ipv6.dest_ip[0]; + + hv3 = e->ipv6.src_ip[1] ^ e->ipv6.dest_ip[1]; + hv3 ^= e->ipv6.src_ip[0]; + break; + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + default: + WARN_ON_ONCE(1); + return MTK_PPE_HASH_MASK; + } + + hash = (hv1 & hv2) | ((~hv1) & hv3); + hash = (hash >> 24) | ((hash & 0xffffff) << 8); + hash ^= hv1 ^ hv2 ^ hv3; + hash ^= hash >> 16; + hash <<= (ffs(eth->soc->hash_offset) - 1); + hash &= MTK_PPE_ENTRIES - 1; + + return hash; +} + +static inline struct mtk_foe_mac_info * +mtk_foe_entry_l2(struct mtk_eth *eth, struct mtk_foe_entry *entry) +{ + int type = mtk_get_ib1_pkt_type(eth, entry->ib1); + + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return &entry->bridge.l2; + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.l2; + + return &entry->ipv4.l2; +} + +static inline u32 * +mtk_foe_entry_ib2(struct mtk_eth *eth, struct mtk_foe_entry *entry) +{ + int type = mtk_get_ib1_pkt_type(eth, entry->ib1); + + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return &entry->bridge.ib2; + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) + return &entry->ipv6.ib2; + + return &entry->ipv4.ib2; +} + +int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int type, int l4proto, u8 pse_port, u8 *src_mac, + u8 *dest_mac) +{ + struct mtk_foe_mac_info *l2; + u32 ports_pad, val; + + memset(entry, 0, sizeof(*entry)); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE_V2, type) | + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | + MTK_FOE_IB1_BIND_CACHE_V2 | MTK_FOE_IB1_BIND_TTL_V2; + entry->ib1 = val; + + val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, pse_port) | + FIELD_PREP(MTK_FOE_IB2_PORT_AG_V2, 0xf); + } else { + val = FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_BIND) | + FIELD_PREP(MTK_FOE_IB1_PACKET_TYPE, type) | + FIELD_PREP(MTK_FOE_IB1_UDP, l4proto == IPPROTO_UDP) | + MTK_FOE_IB1_BIND_CACHE | MTK_FOE_IB1_BIND_TTL; + entry->ib1 = val; + + val = FIELD_PREP(MTK_FOE_IB2_DEST_PORT, pse_port) | + FIELD_PREP(MTK_FOE_IB2_PORT_MG, 0x3f) | + FIELD_PREP(MTK_FOE_IB2_PORT_AG, 0x1f); + } + + if (is_multicast_ether_addr(dest_mac)) + val |= mtk_get_ib2_multicast_mask(eth); + + ports_pad = 0xa5a5a500 | (l4proto & 0xff); + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) + entry->ipv4.orig.ports = ports_pad; + if (type == MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + entry->ipv6.ports = ports_pad; + + if (type == MTK_PPE_PKT_TYPE_BRIDGE) { + ether_addr_copy(entry->bridge.src_mac, src_mac); + ether_addr_copy(entry->bridge.dest_mac, dest_mac); + entry->bridge.ib2 = val; + l2 = &entry->bridge.l2; + } else if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + entry->ipv6.ib2 = val; + l2 = &entry->ipv6.l2; + } else { + entry->ipv4.ib2 = val; + l2 = &entry->ipv4.l2; + } + + l2->dest_mac_hi = get_unaligned_be32(dest_mac); + l2->dest_mac_lo = get_unaligned_be16(dest_mac + 4); + l2->src_mac_hi = get_unaligned_be32(src_mac); + l2->src_mac_lo = get_unaligned_be16(src_mac + 4); + + if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T) + l2->etype = ETH_P_IPV6; + else + l2->etype = ETH_P_IP; + + return 0; +} + +int mtk_foe_entry_set_pse_port(struct mtk_eth *eth, + struct mtk_foe_entry *entry, u8 port) +{ + u32 *ib2 = mtk_foe_entry_ib2(eth, entry); + u32 val = *ib2; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + val &= ~MTK_FOE_IB2_DEST_PORT_V2; + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT_V2, port); + } else { + val &= ~MTK_FOE_IB2_DEST_PORT; + val |= FIELD_PREP(MTK_FOE_IB2_DEST_PORT, port); + } + *ib2 = val; + + return 0; +} + +int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth, + struct mtk_foe_entry *entry, bool egress, + __be32 src_addr, __be16 src_port, + __be32 dest_addr, __be16 dest_port) +{ + int type = mtk_get_ib1_pkt_type(eth, entry->ib1); + struct mtk_ipv4_tuple *t; + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + if (egress) { + t = &entry->ipv4.new; + break; + } + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + t = &entry->ipv4.orig; + break; + case MTK_PPE_PKT_TYPE_IPV6_6RD: + entry->ipv6_6rd.tunnel_src_ip = be32_to_cpu(src_addr); + entry->ipv6_6rd.tunnel_dest_ip = be32_to_cpu(dest_addr); + return 0; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + t->src_ip = be32_to_cpu(src_addr); + t->dest_ip = be32_to_cpu(dest_addr); + + if (type == MTK_PPE_PKT_TYPE_IPV4_ROUTE) + return 0; + + t->src_port = be16_to_cpu(src_port); + t->dest_port = be16_to_cpu(dest_port); + + return 0; +} + +int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, + struct mtk_foe_entry *entry, + __be32 *src_addr, __be16 src_port, + __be32 *dest_addr, __be16 dest_port) +{ + int type = mtk_get_ib1_pkt_type(eth, entry->ib1); + u32 *src, *dest; + int i; + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + src = entry->dslite.tunnel_src_ip; + dest = entry->dslite.tunnel_dest_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + entry->ipv6.src_port = be16_to_cpu(src_port); + entry->ipv6.dest_port = be16_to_cpu(dest_port); + fallthrough; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + src = entry->ipv6.src_ip; + dest = entry->ipv6.dest_ip; + break; + default: + WARN_ON_ONCE(1); + return -EINVAL; + } + + for (i = 0; i < 4; i++) + src[i] = be32_to_cpu(src_addr[i]); + for (i = 0; i < 4; i++) + dest[i] = be32_to_cpu(dest_addr[i]); + + return 0; +} + +int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int port) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); + + l2->etype = BIT(port); + + if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth))) + entry->ib1 |= mtk_prep_ib1_vlan_layer(eth, 1); + else + l2->etype |= BIT(8); + + entry->ib1 &= ~mtk_get_ib1_vlan_tag_mask(eth); + + return 0; +} + +int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int vid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); + + switch (mtk_get_ib1_vlan_layer(eth, entry->ib1)) { + case 0: + entry->ib1 |= mtk_get_ib1_vlan_tag_mask(eth) | + mtk_prep_ib1_vlan_layer(eth, 1); + l2->vlan1 = vid; + return 0; + case 1: + if (!(entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) { + l2->vlan1 = vid; + l2->etype |= BIT(8); + } else { + l2->vlan2 = vid; + entry->ib1 += mtk_prep_ib1_vlan_layer(eth, 1); + } + return 0; + default: + return -ENOSPC; + } +} + +int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int sid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); + + if (!(entry->ib1 & mtk_get_ib1_vlan_layer_mask(eth)) || + (entry->ib1 & mtk_get_ib1_vlan_tag_mask(eth))) + l2->etype = ETH_P_PPP_SES; + + entry->ib1 |= mtk_get_ib1_ppoe_mask(eth); + l2->pppoe_id = sid; + + return 0; +} + +int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int wdma_idx, int txq, int bss, int wcid) +{ + struct mtk_foe_mac_info *l2 = mtk_foe_entry_l2(eth, entry); + u32 *ib2 = mtk_foe_entry_ib2(eth, entry); + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + *ib2 &= ~MTK_FOE_IB2_PORT_MG_V2; + *ib2 |= FIELD_PREP(MTK_FOE_IB2_RX_IDX, txq) | + MTK_FOE_IB2_WDMA_WINFO_V2; + l2->winfo = FIELD_PREP(MTK_FOE_WINFO_WCID, wcid) | + FIELD_PREP(MTK_FOE_WINFO_BSS, bss); + } else { + *ib2 &= ~MTK_FOE_IB2_PORT_MG; + *ib2 |= MTK_FOE_IB2_WDMA_WINFO; + if (wdma_idx) + *ib2 |= MTK_FOE_IB2_WDMA_DEVIDX; + l2->vlan2 = FIELD_PREP(MTK_FOE_VLAN2_WINFO_BSS, bss) | + FIELD_PREP(MTK_FOE_VLAN2_WINFO_WCID, wcid) | + FIELD_PREP(MTK_FOE_VLAN2_WINFO_RING, txq); + } + + return 0; +} + +static bool +mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, + struct mtk_foe_entry *data) +{ + int type, len; + + if ((data->ib1 ^ entry->data.ib1) & MTK_FOE_IB1_UDP) + return false; + + type = mtk_get_ib1_pkt_type(eth, entry->data.ib1); + if (type > MTK_PPE_PKT_TYPE_IPV4_DSLITE) + len = offsetof(struct mtk_foe_entry, ipv6._rsv); + else + len = offsetof(struct mtk_foe_entry, ipv4.ib2); + + return !memcmp(&entry->data.data, &data->data, len - 4); +} + +static void +__mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + struct hlist_head *head; + struct hlist_node *tmp; + + if (entry->type == MTK_FLOW_TYPE_L2) { + rhashtable_remove_fast(&ppe->l2_flows, &entry->l2_node, + mtk_flow_l2_ht_params); + + head = &entry->l2_flows; + hlist_for_each_entry_safe(entry, tmp, head, l2_data.list) + __mtk_foe_entry_clear(ppe, entry); + return; + } + + hlist_del_init(&entry->list); + if (entry->hash != 0xffff) { + struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, entry->hash); + + hwe->ib1 &= ~MTK_FOE_IB1_STATE; + hwe->ib1 |= FIELD_PREP(MTK_FOE_IB1_STATE, MTK_FOE_STATE_INVALID); + dma_wmb(); + mtk_ppe_cache_clear(ppe); + } + entry->hash = 0xffff; + + if (entry->type != MTK_FLOW_TYPE_L2_SUBFLOW) + return; + + hlist_del_init(&entry->l2_data.list); + kfree(entry); +} + +static int __mtk_foe_entry_idle_time(struct mtk_ppe *ppe, u32 ib1) +{ + u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); + u16 now = mtk_eth_timestamp(ppe->eth); + u16 timestamp = ib1 & ib1_ts_mask; + + if (timestamp > now) + return ib1_ts_mask + 1 - timestamp + now; + else + return now - timestamp; +} + +static void +mtk_flow_entry_update_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + u32 ib1_ts_mask = mtk_get_ib1_ts_mask(ppe->eth); + struct mtk_flow_entry *cur; + struct mtk_foe_entry *hwe; + struct hlist_node *tmp; + int idle; + + idle = __mtk_foe_entry_idle_time(ppe, entry->data.ib1); + hlist_for_each_entry_safe(cur, tmp, &entry->l2_flows, l2_data.list) { + int cur_idle; + u32 ib1; + + hwe = mtk_foe_get_entry(ppe, cur->hash); + ib1 = READ_ONCE(hwe->ib1); + + if (FIELD_GET(MTK_FOE_IB1_STATE, ib1) != MTK_FOE_STATE_BIND) { + cur->hash = 0xffff; + __mtk_foe_entry_clear(ppe, cur); + continue; + } + + cur_idle = __mtk_foe_entry_idle_time(ppe, ib1); + if (cur_idle >= idle) + continue; + + idle = cur_idle; + entry->data.ib1 &= ~ib1_ts_mask; + entry->data.ib1 |= hwe->ib1 & ib1_ts_mask; + } +} + +static void +mtk_flow_entry_update(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + struct mtk_foe_entry foe = {}; + struct mtk_foe_entry *hwe; + + spin_lock_bh(&ppe_lock); + + if (entry->type == MTK_FLOW_TYPE_L2) { + mtk_flow_entry_update_l2(ppe, entry); + goto out; + } + + if (entry->hash == 0xffff) + goto out; + + hwe = mtk_foe_get_entry(ppe, entry->hash); + memcpy(&foe, hwe, ppe->eth->soc->foe_entry_size); + if (!mtk_flow_entry_match(ppe->eth, entry, &foe)) { + entry->hash = 0xffff; + goto out; + } + + entry->data.ib1 = foe.ib1; + +out: + spin_unlock_bh(&ppe_lock); +} + +static void +__mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, + u16 hash) +{ + struct mtk_eth *eth = ppe->eth; + u16 timestamp = mtk_eth_timestamp(eth); + struct mtk_foe_entry *hwe; + + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2; + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP_V2, + timestamp); + } else { + entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP; + entry->ib1 |= FIELD_PREP(MTK_FOE_IB1_BIND_TIMESTAMP, + timestamp); + } + + hwe = mtk_foe_get_entry(ppe, hash); + memcpy(&hwe->data, &entry->data, eth->soc->foe_entry_size - sizeof(hwe->ib1)); + wmb(); + hwe->ib1 = entry->ib1; + + dma_wmb(); + + mtk_ppe_cache_clear(ppe); +} + +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + spin_lock_bh(&ppe_lock); + __mtk_foe_entry_clear(ppe, entry); + spin_unlock_bh(&ppe_lock); +} + +static int +mtk_foe_entry_commit_l2(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + entry->type = MTK_FLOW_TYPE_L2; + + return rhashtable_insert_fast(&ppe->l2_flows, &entry->l2_node, + mtk_flow_l2_ht_params); +} + +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + const struct mtk_soc_data *soc = ppe->eth->soc; + int type = mtk_get_ib1_pkt_type(ppe->eth, entry->data.ib1); + u32 hash; + + if (type == MTK_PPE_PKT_TYPE_BRIDGE) + return mtk_foe_entry_commit_l2(ppe, entry); + + hash = mtk_ppe_hash_entry(ppe->eth, &entry->data); + entry->hash = 0xffff; + spin_lock_bh(&ppe_lock); + hlist_add_head(&entry->list, &ppe->foe_flow[hash / soc->hash_offset]); + spin_unlock_bh(&ppe_lock); + + return 0; +} + +static void +mtk_foe_entry_commit_subflow(struct mtk_ppe *ppe, struct mtk_flow_entry *entry, + u16 hash) +{ + const struct mtk_soc_data *soc = ppe->eth->soc; + struct mtk_flow_entry *flow_info; + struct mtk_foe_entry foe = {}, *hwe; + struct mtk_foe_mac_info *l2; + u32 ib1_mask = mtk_get_ib1_pkt_type_mask(ppe->eth) | MTK_FOE_IB1_UDP; + int type; + + flow_info = kzalloc(sizeof(*flow_info), GFP_ATOMIC); + if (!flow_info) + return; + + flow_info->l2_data.base_flow = entry; + flow_info->type = MTK_FLOW_TYPE_L2_SUBFLOW; + flow_info->hash = hash; + hlist_add_head(&flow_info->list, + &ppe->foe_flow[hash / soc->hash_offset]); + hlist_add_head(&flow_info->l2_data.list, &entry->l2_flows); + + hwe = mtk_foe_get_entry(ppe, hash); + memcpy(&foe, hwe, soc->foe_entry_size); + foe.ib1 &= ib1_mask; + foe.ib1 |= entry->data.ib1 & ~ib1_mask; + + l2 = mtk_foe_entry_l2(ppe->eth, &foe); + memcpy(l2, &entry->data.bridge.l2, sizeof(*l2)); + + type = mtk_get_ib1_pkt_type(ppe->eth, foe.ib1); + if (type == MTK_PPE_PKT_TYPE_IPV4_HNAPT) + memcpy(&foe.ipv4.new, &foe.ipv4.orig, sizeof(foe.ipv4.new)); + else if (type >= MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T && l2->etype == ETH_P_IP) + l2->etype = ETH_P_IPV6; + + *mtk_foe_entry_ib2(ppe->eth, &foe) = entry->data.bridge.ib2; + + __mtk_foe_entry_commit(ppe, &foe, hash); +} + +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) +{ + const struct mtk_soc_data *soc = ppe->eth->soc; + struct hlist_head *head = &ppe->foe_flow[hash / soc->hash_offset]; + struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, hash); + struct mtk_flow_entry *entry; + struct mtk_foe_bridge key = {}; + struct hlist_node *n; + struct ethhdr *eh; + bool found = false; + u8 *tag; + + spin_lock_bh(&ppe_lock); + + if (FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == MTK_FOE_STATE_BIND) + goto out; + + hlist_for_each_entry_safe(entry, n, head, list) { + if (entry->type == MTK_FLOW_TYPE_L2_SUBFLOW) { + if (unlikely(FIELD_GET(MTK_FOE_IB1_STATE, hwe->ib1) == + MTK_FOE_STATE_BIND)) + continue; + + entry->hash = 0xffff; + __mtk_foe_entry_clear(ppe, entry); + continue; + } + + if (found || !mtk_flow_entry_match(ppe->eth, entry, hwe)) { + if (entry->hash != 0xffff) + entry->hash = 0xffff; + continue; + } + + entry->hash = hash; + __mtk_foe_entry_commit(ppe, &entry->data, hash); + found = true; + } + + if (found) + goto out; + + eh = eth_hdr(skb); + ether_addr_copy(key.dest_mac, eh->h_dest); + ether_addr_copy(key.src_mac, eh->h_source); + tag = skb->data - 2; + key.vlan = 0; + switch (skb->protocol) { +#if IS_ENABLED(CONFIG_NET_DSA) + case htons(ETH_P_XDSA): + if (!netdev_uses_dsa(skb->dev) || + skb->dev->dsa_ptr->tag_ops->proto != DSA_TAG_PROTO_MTK) + goto out; + + tag += 4; + if (get_unaligned_be16(tag) != ETH_P_8021Q) + break; + + fallthrough; +#endif + case htons(ETH_P_8021Q): + key.vlan = get_unaligned_be16(tag + 2) & VLAN_VID_MASK; + break; + default: + break; + } + + entry = rhashtable_lookup_fast(&ppe->l2_flows, &key, mtk_flow_l2_ht_params); + if (!entry) + goto out; + + mtk_foe_entry_commit_subflow(ppe, entry, hash); + +out: + spin_unlock_bh(&ppe_lock); +} + +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry) +{ + mtk_flow_entry_update(ppe, entry); + + return __mtk_foe_entry_idle_time(ppe, entry->data.ib1); +} + +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, + int version, int index) +{ + const struct mtk_soc_data *soc = eth->soc; + struct device *dev = eth->dev; + struct mtk_ppe *ppe; + u32 foe_flow_size; + void *foe; + + ppe = devm_kzalloc(dev, sizeof(*ppe), GFP_KERNEL); + if (!ppe) + return NULL; + + rhashtable_init(&ppe->l2_flows, &mtk_flow_l2_ht_params); + + /* need to allocate a separate device, since it PPE DMA access is + * not coherent. + */ + ppe->base = base; + ppe->eth = eth; + ppe->dev = dev; + ppe->version = version; + + foe = dmam_alloc_coherent(ppe->dev, + MTK_PPE_ENTRIES * soc->foe_entry_size, + &ppe->foe_phys, GFP_KERNEL); + if (!foe) + goto err_free_l2_flows; + + ppe->foe_table = foe; + + foe_flow_size = (MTK_PPE_ENTRIES / soc->hash_offset) * + sizeof(*ppe->foe_flow); + ppe->foe_flow = devm_kzalloc(dev, foe_flow_size, GFP_KERNEL); + if (!ppe->foe_flow) + goto err_free_l2_flows; + + mtk_ppe_debugfs_init(ppe, index); + + return ppe; + +err_free_l2_flows: + rhashtable_destroy(&ppe->l2_flows); + return NULL; +} + +void mtk_ppe_deinit(struct mtk_eth *eth) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) { + if (!eth->ppe[i]) + return; + rhashtable_destroy(ð->ppe[i]->l2_flows); + } +} + +static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe) +{ + static const u8 skip[] = { 12, 25, 38, 51, 76, 89, 102 }; + int i, k; + + memset(ppe->foe_table, 0, + MTK_PPE_ENTRIES * ppe->eth->soc->foe_entry_size); + + if (!IS_ENABLED(CONFIG_SOC_MT7621)) + return; + + /* skip all entries that cross the 1024 byte boundary */ + for (i = 0; i < MTK_PPE_ENTRIES; i += 128) { + for (k = 0; k < ARRAY_SIZE(skip); k++) { + struct mtk_foe_entry *hwe; + + hwe = mtk_foe_get_entry(ppe, i + skip[k]); + hwe->ib1 |= MTK_FOE_IB1_STATIC; + } + } +} + +void mtk_ppe_start(struct mtk_ppe *ppe) +{ + u32 val; + + if (!ppe) + return; + + mtk_ppe_init_foe_table(ppe); + ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys); + + val = MTK_PPE_TB_CFG_ENTRY_80B | + MTK_PPE_TB_CFG_AGE_NON_L4 | + MTK_PPE_TB_CFG_AGE_UNBIND | + MTK_PPE_TB_CFG_AGE_TCP | + MTK_PPE_TB_CFG_AGE_UDP | + MTK_PPE_TB_CFG_AGE_TCP_FIN | + FIELD_PREP(MTK_PPE_TB_CFG_SEARCH_MISS, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD) | + FIELD_PREP(MTK_PPE_TB_CFG_KEEPALIVE, + MTK_PPE_KEEPALIVE_DISABLE) | + FIELD_PREP(MTK_PPE_TB_CFG_HASH_MODE, 1) | + FIELD_PREP(MTK_PPE_TB_CFG_SCAN_MODE, + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE) | + FIELD_PREP(MTK_PPE_TB_CFG_ENTRY_NUM, + MTK_PPE_ENTRIES_SHIFT); + if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) + val |= MTK_PPE_TB_CFG_INFO_SEL; + ppe_w32(ppe, MTK_PPE_TB_CFG, val); + + ppe_w32(ppe, MTK_PPE_IP_PROTO_CHK, + MTK_PPE_IP_PROTO_CHK_IPV4 | MTK_PPE_IP_PROTO_CHK_IPV6); + + mtk_ppe_cache_enable(ppe, true); + + val = MTK_PPE_FLOW_CFG_IP6_3T_ROUTE | + MTK_PPE_FLOW_CFG_IP6_5T_ROUTE | + MTK_PPE_FLOW_CFG_IP6_6RD | + MTK_PPE_FLOW_CFG_IP4_NAT | + MTK_PPE_FLOW_CFG_IP4_NAPT | + MTK_PPE_FLOW_CFG_IP4_DSLITE | + MTK_PPE_FLOW_CFG_IP4_NAT_FRAG; + if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) + val |= MTK_PPE_MD_TOAP_BYP_CRSN0 | + MTK_PPE_MD_TOAP_BYP_CRSN1 | + MTK_PPE_MD_TOAP_BYP_CRSN2 | + MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY; + else + val |= MTK_PPE_FLOW_CFG_IP4_TCP_FRAG | + MTK_PPE_FLOW_CFG_IP4_UDP_FRAG; + ppe_w32(ppe, MTK_PPE_FLOW_CFG, val); + + val = FIELD_PREP(MTK_PPE_UNBIND_AGE_MIN_PACKETS, 1000) | + FIELD_PREP(MTK_PPE_UNBIND_AGE_DELTA, 3); + ppe_w32(ppe, MTK_PPE_UNBIND_AGE, val); + + val = FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_UDP, 12) | + FIELD_PREP(MTK_PPE_BIND_AGE0_DELTA_NON_L4, 1); + ppe_w32(ppe, MTK_PPE_BIND_AGE0, val); + + val = FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP_FIN, 1) | + FIELD_PREP(MTK_PPE_BIND_AGE1_DELTA_TCP, 7); + ppe_w32(ppe, MTK_PPE_BIND_AGE1, val); + + val = MTK_PPE_BIND_LIMIT0_QUARTER | MTK_PPE_BIND_LIMIT0_HALF; + ppe_w32(ppe, MTK_PPE_BIND_LIMIT0, val); + + val = MTK_PPE_BIND_LIMIT1_FULL | + FIELD_PREP(MTK_PPE_BIND_LIMIT1_NON_L4, 1); + ppe_w32(ppe, MTK_PPE_BIND_LIMIT1, val); + + val = FIELD_PREP(MTK_PPE_BIND_RATE_BIND, 30) | + FIELD_PREP(MTK_PPE_BIND_RATE_PREBIND, 1); + ppe_w32(ppe, MTK_PPE_BIND_RATE, val); + + /* enable PPE */ + val = MTK_PPE_GLO_CFG_EN | + MTK_PPE_GLO_CFG_IP4_L4_CS_DROP | + MTK_PPE_GLO_CFG_IP4_CS_DROP | + MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE; + ppe_w32(ppe, MTK_PPE_GLO_CFG, val); + + ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0); + + if (MTK_HAS_CAPS(ppe->eth->soc->caps, MTK_NETSYS_V2)) { + ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT1, 0xcb777); + ppe_w32(ppe, MTK_PPE_SBW_CTRL, 0x7f); + } +} + +int mtk_ppe_stop(struct mtk_ppe *ppe) +{ + u32 val; + int i; + + if (!ppe) + return 0; + + for (i = 0; i < MTK_PPE_ENTRIES; i++) { + struct mtk_foe_entry *hwe = mtk_foe_get_entry(ppe, i); + + hwe->ib1 = FIELD_PREP(MTK_FOE_IB1_STATE, + MTK_FOE_STATE_INVALID); + } + + mtk_ppe_cache_enable(ppe, false); + + /* disable offload engine */ + ppe_clear(ppe, MTK_PPE_GLO_CFG, MTK_PPE_GLO_CFG_EN); + ppe_w32(ppe, MTK_PPE_FLOW_CFG, 0); + + /* disable aging */ + val = MTK_PPE_TB_CFG_AGE_NON_L4 | + MTK_PPE_TB_CFG_AGE_UNBIND | + MTK_PPE_TB_CFG_AGE_TCP | + MTK_PPE_TB_CFG_AGE_UDP | + MTK_PPE_TB_CFG_AGE_TCP_FIN; + ppe_clear(ppe, MTK_PPE_TB_CFG, val); + + return mtk_ppe_wait_busy(ppe); +} diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h new file mode 100644 index 000000000..e66283b1b --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -0,0 +1,358 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_PPE_H +#define __MTK_PPE_H + +#include <linux/kernel.h> +#include <linux/bitfield.h> +#include <linux/rhashtable.h> + +#define MTK_PPE_ENTRIES_SHIFT 3 +#define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) +#define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) +#define MTK_PPE_WAIT_TIMEOUT_US 1000000 + +#define MTK_FOE_IB1_UNBIND_TIMESTAMP GENMASK(7, 0) +#define MTK_FOE_IB1_UNBIND_PACKETS GENMASK(23, 8) +#define MTK_FOE_IB1_UNBIND_PREBIND BIT(24) + +#define MTK_FOE_IB1_BIND_TIMESTAMP GENMASK(14, 0) +#define MTK_FOE_IB1_BIND_KEEPALIVE BIT(15) +#define MTK_FOE_IB1_BIND_VLAN_LAYER GENMASK(18, 16) +#define MTK_FOE_IB1_BIND_PPPOE BIT(19) +#define MTK_FOE_IB1_BIND_VLAN_TAG BIT(20) +#define MTK_FOE_IB1_BIND_PKT_SAMPLE BIT(21) +#define MTK_FOE_IB1_BIND_CACHE BIT(22) +#define MTK_FOE_IB1_BIND_TUNNEL_DECAP BIT(23) +#define MTK_FOE_IB1_BIND_TTL BIT(24) + +#define MTK_FOE_IB1_PACKET_TYPE GENMASK(27, 25) +#define MTK_FOE_IB1_STATE GENMASK(29, 28) +#define MTK_FOE_IB1_UDP BIT(30) +#define MTK_FOE_IB1_STATIC BIT(31) + +/* CONFIG_MEDIATEK_NETSYS_V2 */ +#define MTK_FOE_IB1_BIND_TIMESTAMP_V2 GENMASK(7, 0) +#define MTK_FOE_IB1_BIND_VLAN_LAYER_V2 GENMASK(16, 14) +#define MTK_FOE_IB1_BIND_PPPOE_V2 BIT(17) +#define MTK_FOE_IB1_BIND_VLAN_TAG_V2 BIT(18) +#define MTK_FOE_IB1_BIND_CACHE_V2 BIT(20) +#define MTK_FOE_IB1_BIND_TTL_V2 BIT(22) +#define MTK_FOE_IB1_PACKET_TYPE_V2 GENMASK(27, 23) + +enum { + MTK_PPE_PKT_TYPE_IPV4_HNAPT = 0, + MTK_PPE_PKT_TYPE_IPV4_ROUTE = 1, + MTK_PPE_PKT_TYPE_BRIDGE = 2, + MTK_PPE_PKT_TYPE_IPV4_DSLITE = 3, + MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T = 4, + MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T = 5, + MTK_PPE_PKT_TYPE_IPV6_6RD = 7, +}; + +#define MTK_FOE_IB2_QID GENMASK(3, 0) +#define MTK_FOE_IB2_PSE_QOS BIT(4) +#define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) +#define MTK_FOE_IB2_MULTICAST BIT(8) + +#define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12) +#define MTK_FOE_IB2_WDMA_DEVIDX BIT(16) +#define MTK_FOE_IB2_WDMA_WINFO BIT(17) + +#define MTK_FOE_IB2_PORT_MG GENMASK(17, 12) + +#define MTK_FOE_IB2_RX_IDX GENMASK(18, 17) +#define MTK_FOE_IB2_PORT_AG GENMASK(23, 18) + +#define MTK_FOE_IB2_DSCP GENMASK(31, 24) + +/* CONFIG_MEDIATEK_NETSYS_V2 */ +#define MTK_FOE_IB2_PORT_MG_V2 BIT(7) +#define MTK_FOE_IB2_DEST_PORT_V2 GENMASK(12, 9) +#define MTK_FOE_IB2_MULTICAST_V2 BIT(13) +#define MTK_FOE_IB2_WDMA_WINFO_V2 BIT(19) +#define MTK_FOE_IB2_PORT_AG_V2 GENMASK(23, 20) + +#define MTK_FOE_VLAN2_WINFO_BSS GENMASK(5, 0) +#define MTK_FOE_VLAN2_WINFO_WCID GENMASK(13, 6) +#define MTK_FOE_VLAN2_WINFO_RING GENMASK(15, 14) + +#define MTK_FOE_WINFO_BSS GENMASK(5, 0) +#define MTK_FOE_WINFO_WCID GENMASK(15, 6) + +enum { + MTK_FOE_STATE_INVALID, + MTK_FOE_STATE_UNBIND, + MTK_FOE_STATE_BIND, + MTK_FOE_STATE_FIN +}; + +struct mtk_foe_mac_info { + u16 vlan1; + u16 etype; + + u32 dest_mac_hi; + + u16 vlan2; + u16 dest_mac_lo; + + u32 src_mac_hi; + + u16 pppoe_id; + u16 src_mac_lo; + + u16 minfo; + u16 winfo; +}; + +/* software-only entry type */ +struct mtk_foe_bridge { + u8 dest_mac[ETH_ALEN]; + u8 src_mac[ETH_ALEN]; + u16 vlan; + + struct {} key_end; + + u32 ib2; + + struct mtk_foe_mac_info l2; +}; + +struct mtk_ipv4_tuple { + u32 src_ip; + u32 dest_ip; + union { + struct { + u16 dest_port; + u16 src_port; + }; + struct { + u8 protocol; + u8 _pad[3]; /* fill with 0xa5a5a5 */ + }; + u32 ports; + }; +}; + +struct mtk_foe_ipv4 { + struct mtk_ipv4_tuple orig; + + u32 ib2; + + struct mtk_ipv4_tuple new; + + u16 timestamp; + u16 _rsv0[3]; + + u32 udf_tsid; + + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_ipv4_dslite { + struct mtk_ipv4_tuple ip4; + + u32 tunnel_src_ip[4]; + u32 tunnel_dest_ip[4]; + + u8 flow_label[3]; + u8 priority; + + u32 udf_tsid; + + u32 ib2; + + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_ipv6 { + u32 src_ip[4]; + u32 dest_ip[4]; + + union { + struct { + u8 protocol; + u8 _pad[3]; /* fill with 0xa5a5a5 */ + }; /* 3-tuple */ + struct { + u16 dest_port; + u16 src_port; + }; /* 5-tuple */ + u32 ports; + }; + + u32 _rsv[3]; + + u32 udf; + + u32 ib2; + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_ipv6_6rd { + u32 src_ip[4]; + u32 dest_ip[4]; + u16 dest_port; + u16 src_port; + + u32 tunnel_src_ip; + u32 tunnel_dest_ip; + + u16 hdr_csum; + u8 dscp; + u8 ttl; + + u8 flag; + u8 pad; + u8 per_flow_6rd_id; + u8 pad2; + + u32 ib2; + struct mtk_foe_mac_info l2; +}; + +struct mtk_foe_entry { + u32 ib1; + + union { + struct mtk_foe_bridge bridge; + struct mtk_foe_ipv4 ipv4; + struct mtk_foe_ipv4_dslite dslite; + struct mtk_foe_ipv6 ipv6; + struct mtk_foe_ipv6_6rd ipv6_6rd; + u32 data[23]; + }; +}; + +enum { + MTK_PPE_CPU_REASON_TTL_EXCEEDED = 0x02, + MTK_PPE_CPU_REASON_OPTION_HEADER = 0x03, + MTK_PPE_CPU_REASON_NO_FLOW = 0x07, + MTK_PPE_CPU_REASON_IPV4_FRAG = 0x08, + MTK_PPE_CPU_REASON_IPV4_DSLITE_FRAG = 0x09, + MTK_PPE_CPU_REASON_IPV4_DSLITE_NO_TCP_UDP = 0x0a, + MTK_PPE_CPU_REASON_IPV6_6RD_NO_TCP_UDP = 0x0b, + MTK_PPE_CPU_REASON_TCP_FIN_SYN_RST = 0x0c, + MTK_PPE_CPU_REASON_UN_HIT = 0x0d, + MTK_PPE_CPU_REASON_HIT_UNBIND = 0x0e, + MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED = 0x0f, + MTK_PPE_CPU_REASON_HIT_BIND_TCP_FIN = 0x10, + MTK_PPE_CPU_REASON_HIT_TTL_1 = 0x11, + MTK_PPE_CPU_REASON_HIT_BIND_VLAN_VIOLATION = 0x12, + MTK_PPE_CPU_REASON_KEEPALIVE_UC_OLD_HDR = 0x13, + MTK_PPE_CPU_REASON_KEEPALIVE_MC_NEW_HDR = 0x14, + MTK_PPE_CPU_REASON_KEEPALIVE_DUP_OLD_HDR = 0x15, + MTK_PPE_CPU_REASON_HIT_BIND_FORCE_CPU = 0x16, + MTK_PPE_CPU_REASON_TUNNEL_OPTION_HEADER = 0x17, + MTK_PPE_CPU_REASON_MULTICAST_TO_CPU = 0x18, + MTK_PPE_CPU_REASON_MULTICAST_TO_GMAC1_CPU = 0x19, + MTK_PPE_CPU_REASON_HIT_PRE_BIND = 0x1a, + MTK_PPE_CPU_REASON_PACKET_SAMPLING = 0x1b, + MTK_PPE_CPU_REASON_EXCEED_MTU = 0x1c, + MTK_PPE_CPU_REASON_PPE_BYPASS = 0x1e, + MTK_PPE_CPU_REASON_INVALID = 0x1f, +}; + +enum { + MTK_FLOW_TYPE_L4, + MTK_FLOW_TYPE_L2, + MTK_FLOW_TYPE_L2_SUBFLOW, +}; + +struct mtk_flow_entry { + union { + struct hlist_node list; + struct { + struct rhash_head l2_node; + struct hlist_head l2_flows; + }; + }; + u8 type; + s8 wed_index; + u8 ppe_index; + u16 hash; + union { + struct mtk_foe_entry data; + struct { + struct mtk_flow_entry *base_flow; + struct hlist_node list; + } l2_data; + }; + struct rhash_head node; + unsigned long cookie; +}; + +struct mtk_ppe { + struct mtk_eth *eth; + struct device *dev; + void __iomem *base; + int version; + char dirname[5]; + + void *foe_table; + dma_addr_t foe_phys; + + u16 foe_check_time[MTK_PPE_ENTRIES]; + struct hlist_head *foe_flow; + + struct rhashtable l2_flows; + + void *acct_table; +}; + +struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, + int version, int index); +void mtk_ppe_deinit(struct mtk_eth *eth); +void mtk_ppe_start(struct mtk_ppe *ppe); +int mtk_ppe_stop(struct mtk_ppe *ppe); + +void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash); + +static inline void +mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash) +{ + u16 now, diff; + + if (!ppe) + return; + + if (hash > MTK_PPE_HASH_MASK) + return; + + now = (u16)jiffies; + diff = now - ppe->foe_check_time[hash]; + if (diff < HZ / 10) + return; + + ppe->foe_check_time[hash] = now; + __mtk_ppe_check_skb(ppe, skb, hash); +} + +int mtk_foe_entry_prepare(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int type, int l4proto, u8 pse_port, u8 *src_mac, + u8 *dest_mac); +int mtk_foe_entry_set_pse_port(struct mtk_eth *eth, + struct mtk_foe_entry *entry, u8 port); +int mtk_foe_entry_set_ipv4_tuple(struct mtk_eth *eth, + struct mtk_foe_entry *entry, bool orig, + __be32 src_addr, __be16 src_port, + __be32 dest_addr, __be16 dest_port); +int mtk_foe_entry_set_ipv6_tuple(struct mtk_eth *eth, + struct mtk_foe_entry *entry, + __be32 *src_addr, __be16 src_port, + __be32 *dest_addr, __be16 dest_port); +int mtk_foe_entry_set_dsa(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int port); +int mtk_foe_entry_set_vlan(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int vid); +int mtk_foe_entry_set_pppoe(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int sid); +int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, + int wdma_idx, int txq, int bss, int wcid); +int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry); +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index); + +#endif diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c new file mode 100644 index 000000000..391b071bc --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_debugfs.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/debugfs.h> +#include "mtk_eth_soc.h" + +struct mtk_flow_addr_info +{ + void *src, *dest; + u16 *src_port, *dest_port; + bool ipv6; +}; + +static const char *mtk_foe_entry_state_str(int state) +{ + static const char * const state_str[] = { + [MTK_FOE_STATE_INVALID] = "INV", + [MTK_FOE_STATE_UNBIND] = "UNB", + [MTK_FOE_STATE_BIND] = "BND", + [MTK_FOE_STATE_FIN] = "FIN", + }; + + if (state >= ARRAY_SIZE(state_str) || !state_str[state]) + return "UNK"; + + return state_str[state]; +} + +static const char *mtk_foe_pkt_type_str(int type) +{ + static const char * const type_str[] = { + [MTK_PPE_PKT_TYPE_IPV4_HNAPT] = "IPv4 5T", + [MTK_PPE_PKT_TYPE_IPV4_ROUTE] = "IPv4 3T", + [MTK_PPE_PKT_TYPE_IPV4_DSLITE] = "DS-LITE", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T] = "IPv6 3T", + [MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T] = "IPv6 5T", + [MTK_PPE_PKT_TYPE_IPV6_6RD] = "6RD", + }; + + if (type >= ARRAY_SIZE(type_str) || !type_str[type]) + return "UNKNOWN"; + + return type_str[type]; +} + +static void +mtk_print_addr(struct seq_file *m, u32 *addr, bool ipv6) +{ + u32 n_addr[4]; + int i; + + if (!ipv6) { + seq_printf(m, "%pI4h", addr); + return; + } + + for (i = 0; i < ARRAY_SIZE(n_addr); i++) + n_addr[i] = htonl(addr[i]); + seq_printf(m, "%pI6", n_addr); +} + +static void +mtk_print_addr_info(struct seq_file *m, struct mtk_flow_addr_info *ai) +{ + mtk_print_addr(m, ai->src, ai->ipv6); + if (ai->src_port) + seq_printf(m, ":%d", *ai->src_port); + seq_printf(m, "->"); + mtk_print_addr(m, ai->dest, ai->ipv6); + if (ai->dest_port) + seq_printf(m, ":%d", *ai->dest_port); +} + +static int +mtk_ppe_debugfs_foe_show(struct seq_file *m, void *private, bool bind) +{ + struct mtk_ppe *ppe = m->private; + int i; + + for (i = 0; i < MTK_PPE_ENTRIES; i++) { + struct mtk_foe_entry *entry = mtk_foe_get_entry(ppe, i); + struct mtk_foe_mac_info *l2; + struct mtk_flow_addr_info ai = {}; + unsigned char h_source[ETH_ALEN]; + unsigned char h_dest[ETH_ALEN]; + int type, state; + u32 ib2; + + + state = FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1); + if (!state) + continue; + + if (bind && state != MTK_FOE_STATE_BIND) + continue; + + type = FIELD_GET(MTK_FOE_IB1_PACKET_TYPE, entry->ib1); + seq_printf(m, "%05x %s %7s", i, + mtk_foe_entry_state_str(state), + mtk_foe_pkt_type_str(type)); + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + ai.src_port = &entry->ipv4.orig.src_port; + ai.dest_port = &entry->ipv4.orig.dest_port; + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + ai.src = &entry->ipv4.orig.src_ip; + ai.dest = &entry->ipv4.orig.dest_ip; + break; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T: + ai.src_port = &entry->ipv6.src_port; + ai.dest_port = &entry->ipv6.dest_port; + fallthrough; + case MTK_PPE_PKT_TYPE_IPV6_ROUTE_3T: + case MTK_PPE_PKT_TYPE_IPV6_6RD: + ai.src = &entry->ipv6.src_ip; + ai.dest = &entry->ipv6.dest_ip; + ai.ipv6 = true; + break; + } + + seq_printf(m, " orig="); + mtk_print_addr_info(m, &ai); + + switch (type) { + case MTK_PPE_PKT_TYPE_IPV4_HNAPT: + case MTK_PPE_PKT_TYPE_IPV4_DSLITE: + ai.src_port = &entry->ipv4.new.src_port; + ai.dest_port = &entry->ipv4.new.dest_port; + fallthrough; + case MTK_PPE_PKT_TYPE_IPV4_ROUTE: + ai.src = &entry->ipv4.new.src_ip; + ai.dest = &entry->ipv4.new.dest_ip; + seq_printf(m, " new="); + mtk_print_addr_info(m, &ai); + break; + } + + if (type >= MTK_PPE_PKT_TYPE_IPV4_DSLITE) { + l2 = &entry->ipv6.l2; + ib2 = entry->ipv6.ib2; + } else { + l2 = &entry->ipv4.l2; + ib2 = entry->ipv4.ib2; + } + + *((__be32 *)h_source) = htonl(l2->src_mac_hi); + *((__be16 *)&h_source[4]) = htons(l2->src_mac_lo); + *((__be32 *)h_dest) = htonl(l2->dest_mac_hi); + *((__be16 *)&h_dest[4]) = htons(l2->dest_mac_lo); + + seq_printf(m, " eth=%pM->%pM etype=%04x" + " vlan=%d,%d ib1=%08x ib2=%08x\n", + h_source, h_dest, ntohs(l2->etype), + l2->vlan1, l2->vlan2, entry->ib1, ib2); + } + + return 0; +} + +static int +mtk_ppe_debugfs_foe_all_show(struct seq_file *m, void *private) +{ + return mtk_ppe_debugfs_foe_show(m, private, false); +} +DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_all); + +static int +mtk_ppe_debugfs_foe_bind_show(struct seq_file *m, void *private) +{ + return mtk_ppe_debugfs_foe_show(m, private, true); +} +DEFINE_SHOW_ATTRIBUTE(mtk_ppe_debugfs_foe_bind); + +int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index) +{ + struct dentry *root; + + snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index); + + root = debugfs_create_dir(ppe->dirname, NULL); + debugfs_create_file("entries", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_all_fops); + debugfs_create_file("bind", S_IRUGO, root, ppe, &mtk_ppe_debugfs_foe_bind_fops); + + return 0; +} diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c new file mode 100644 index 000000000..6a72687d5 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -0,0 +1,607 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> + */ + +#include <linux/if_ether.h> +#include <linux/rhashtable.h> +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <net/flow_offload.h> +#include <net/pkt_cls.h> +#include <net/dsa.h> +#include "mtk_eth_soc.h" +#include "mtk_wed.h" + +struct mtk_flow_data { + struct ethhdr eth; + + union { + struct { + __be32 src_addr; + __be32 dst_addr; + } v4; + + struct { + struct in6_addr src_addr; + struct in6_addr dst_addr; + } v6; + }; + + __be16 src_port; + __be16 dst_port; + + u16 vlan_in; + + struct { + u16 id; + __be16 proto; + u8 num; + } vlan; + struct { + u16 sid; + u8 num; + } pppoe; +}; + +static const struct rhashtable_params mtk_flow_ht_params = { + .head_offset = offsetof(struct mtk_flow_entry, node), + .key_offset = offsetof(struct mtk_flow_entry, cookie), + .key_len = sizeof(unsigned long), + .automatic_shrinking = true, +}; + +static int +mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe, + struct mtk_flow_data *data, bool egress) +{ + return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress, + data->v4.src_addr, data->src_port, + data->v4.dst_addr, data->dst_port); +} + +static int +mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe, + struct mtk_flow_data *data) +{ + return mtk_foe_entry_set_ipv6_tuple(eth, foe, + data->v6.src_addr.s6_addr32, data->src_port, + data->v6.dst_addr.s6_addr32, data->dst_port); +} + +static void +mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth) +{ + void *dest = eth + act->mangle.offset; + const void *src = &act->mangle.val; + + if (act->mangle.offset > 8) + return; + + if (act->mangle.mask == 0xffff) { + src += 2; + dest += 2; + } + + memcpy(dest, src, act->mangle.mask ? 2 : 4); +} + +static int +mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info) +{ + struct net_device_path_stack stack; + struct net_device_path *path; + int err; + + if (!dev) + return -ENODEV; + + if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED)) + return -1; + + err = dev_fill_forward_path(dev, addr, &stack); + if (err) + return err; + + path = &stack.path[stack.num_paths - 1]; + if (path->type != DEV_PATH_MTK_WDMA) + return -1; + + info->wdma_idx = path->mtk_wdma.wdma_idx; + info->queue = path->mtk_wdma.queue; + info->bss = path->mtk_wdma.bss; + info->wcid = path->mtk_wdma.wcid; + + return 0; +} + + +static int +mtk_flow_mangle_ports(const struct flow_action_entry *act, + struct mtk_flow_data *data) +{ + u32 val = ntohl(act->mangle.val); + + switch (act->mangle.offset) { + case 0: + if (act->mangle.mask == ~htonl(0xffff)) + data->dst_port = cpu_to_be16(val); + else + data->src_port = cpu_to_be16(val >> 16); + break; + case 2: + data->dst_port = cpu_to_be16(val); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int +mtk_flow_mangle_ipv4(const struct flow_action_entry *act, + struct mtk_flow_data *data) +{ + __be32 *dest; + + switch (act->mangle.offset) { + case offsetof(struct iphdr, saddr): + dest = &data->v4.src_addr; + break; + case offsetof(struct iphdr, daddr): + dest = &data->v4.dst_addr; + break; + default: + return -EINVAL; + } + + memcpy(dest, &act->mangle.val, sizeof(u32)); + + return 0; +} + +static int +mtk_flow_get_dsa_port(struct net_device **dev) +{ +#if IS_ENABLED(CONFIG_NET_DSA) + struct dsa_port *dp; + + dp = dsa_port_from_netdev(*dev); + if (IS_ERR(dp)) + return -ENODEV; + + if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK) + return -ENODEV; + + *dev = dsa_port_to_master(dp); + + return dp->index; +#else + return -ENODEV; +#endif +} + +static int +mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe, + struct net_device *dev, const u8 *dest_mac, + int *wed_index) +{ + struct mtk_wdma_info info = {}; + int pse_port, dsa_port; + + if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) { + mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue, + info.bss, info.wcid); + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { + switch (info.wdma_idx) { + case 0: + pse_port = 8; + break; + case 1: + pse_port = 9; + break; + default: + return -EINVAL; + } + } else { + pse_port = 3; + } + *wed_index = info.wdma_idx; + goto out; + } + + dsa_port = mtk_flow_get_dsa_port(&dev); + if (dsa_port >= 0) + mtk_foe_entry_set_dsa(eth, foe, dsa_port); + + if (dev == eth->netdev[0]) + pse_port = 1; + else if (dev == eth->netdev[1]) + pse_port = 2; + else + return -EOPNOTSUPP; + +out: + mtk_foe_entry_set_pse_port(eth, foe, pse_port); + + return 0; +} + +static int +mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct flow_action_entry *act; + struct mtk_flow_data data = {}; + struct mtk_foe_entry foe; + struct net_device *odev = NULL; + struct mtk_flow_entry *entry; + int offload_type = 0; + int wed_index = -1; + u16 addr_type = 0; + u8 l4proto = 0; + int err = 0; + int i; + + if (rhashtable_lookup(ð->flow_table, &f->cookie, mtk_flow_ht_params)) + return -EEXIST; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) { + struct flow_match_meta match; + + flow_rule_match_meta(rule, &match); + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) { + struct flow_match_control match; + + flow_rule_match_control(rule, &match); + addr_type = match.key->addr_type; + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + flow_rule_match_basic(rule, &match); + l4proto = match.key->ip_proto; + } else { + return -EOPNOTSUPP; + } + + switch (addr_type) { + case 0: + offload_type = MTK_PPE_PKT_TYPE_BRIDGE; + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { + struct flow_match_eth_addrs match; + + flow_rule_match_eth_addrs(rule, &match); + memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN); + memcpy(data.eth.h_source, match.key->src, ETH_ALEN); + } else { + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); + + if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q)) + return -EOPNOTSUPP; + + data.vlan_in = match.key->vlan_id; + } + break; + case FLOW_DISSECTOR_KEY_IPV4_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT; + break; + case FLOW_DISSECTOR_KEY_IPV6_ADDRS: + offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T; + break; + default: + return -EOPNOTSUPP; + } + + flow_action_for_each(i, act, &rule->action) { + switch (act->id) { + case FLOW_ACTION_MANGLE: + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH) + mtk_flow_offload_mangle_eth(act, &data.eth); + break; + case FLOW_ACTION_REDIRECT: + odev = act->dev; + break; + case FLOW_ACTION_CSUM: + break; + case FLOW_ACTION_VLAN_PUSH: + if (data.vlan.num == 1 || + act->vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + + data.vlan.id = act->vlan.vid; + data.vlan.proto = act->vlan.proto; + data.vlan.num++; + break; + case FLOW_ACTION_VLAN_POP: + break; + case FLOW_ACTION_PPPOE_PUSH: + if (data.pppoe.num == 1) + return -EOPNOTSUPP; + + data.pppoe.sid = act->pppoe.sid; + data.pppoe.num++; + break; + default: + return -EOPNOTSUPP; + } + } + + if (!is_valid_ether_addr(data.eth.h_source) || + !is_valid_ether_addr(data.eth.h_dest)) + return -EINVAL; + + err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0, + data.eth.h_source, data.eth.h_dest); + if (err) + return err; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { + struct flow_match_ports ports; + + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + + flow_rule_match_ports(rule, &ports); + data.src_port = ports.key->src; + data.dst_port = ports.key->dst; + } else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) { + return -EOPNOTSUPP; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + struct flow_match_ipv4_addrs addrs; + + flow_rule_match_ipv4_addrs(rule, &addrs); + + data.v4.src_addr = addrs.key->src; + data.v4.dst_addr = addrs.key->dst; + + mtk_flow_set_ipv4_addr(eth, &foe, &data, false); + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) { + struct flow_match_ipv6_addrs addrs; + + flow_rule_match_ipv6_addrs(rule, &addrs); + + data.v6.src_addr = addrs.key->src; + data.v6.dst_addr = addrs.key->dst; + + mtk_flow_set_ipv6_addr(eth, &foe, &data); + } + + flow_action_for_each(i, act, &rule->action) { + if (act->id != FLOW_ACTION_MANGLE) + continue; + + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + return -EOPNOTSUPP; + + switch (act->mangle.htype) { + case FLOW_ACT_MANGLE_HDR_TYPE_TCP: + case FLOW_ACT_MANGLE_HDR_TYPE_UDP: + err = mtk_flow_mangle_ports(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_IP4: + err = mtk_flow_mangle_ipv4(act, &data); + break; + case FLOW_ACT_MANGLE_HDR_TYPE_ETH: + /* handled earlier */ + break; + default: + return -EOPNOTSUPP; + } + + if (err) + return err; + } + + if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { + err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true); + if (err) + return err; + } + + if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE) + foe.bridge.vlan = data.vlan_in; + + if (data.vlan.num == 1) { + if (data.vlan.proto != htons(ETH_P_8021Q)) + return -EOPNOTSUPP; + + mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id); + } + if (data.pppoe.num == 1) + mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid); + + err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest, + &wed_index); + if (err) + return err; + + if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0) + return err; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->cookie = f->cookie; + memcpy(&entry->data, &foe, sizeof(entry->data)); + entry->wed_index = wed_index; + + err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry); + if (err < 0) + goto free; + + err = rhashtable_insert_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (err < 0) + goto clear; + + return 0; + +clear: + mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry); +free: + kfree(entry); + if (wed_index >= 0) + mtk_wed_flow_remove(wed_index); + return err; +} + +static int +mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f) +{ + struct mtk_flow_entry *entry; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, + mtk_flow_ht_params); + if (!entry) + return -ENOENT; + + mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry); + rhashtable_remove_fast(ð->flow_table, &entry->node, + mtk_flow_ht_params); + if (entry->wed_index >= 0) + mtk_wed_flow_remove(entry->wed_index); + kfree(entry); + + return 0; +} + +static int +mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f) +{ + struct mtk_flow_entry *entry; + u32 idle; + + entry = rhashtable_lookup(ð->flow_table, &f->cookie, + mtk_flow_ht_params); + if (!entry) + return -ENOENT; + + idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry); + f->stats.lastused = jiffies - idle * HZ; + + return 0; +} + +static DEFINE_MUTEX(mtk_flow_offload_mutex); + +static int +mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) +{ + struct flow_cls_offload *cls = type_data; + struct net_device *dev = cb_priv; + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + int err; + + if (!tc_can_offload(dev)) + return -EOPNOTSUPP; + + if (type != TC_SETUP_CLSFLOWER) + return -EOPNOTSUPP; + + mutex_lock(&mtk_flow_offload_mutex); + switch (cls->command) { + case FLOW_CLS_REPLACE: + err = mtk_flow_offload_replace(eth, cls); + break; + case FLOW_CLS_DESTROY: + err = mtk_flow_offload_destroy(eth, cls); + break; + case FLOW_CLS_STATS: + err = mtk_flow_offload_stats(eth, cls); + break; + default: + err = -EOPNOTSUPP; + break; + } + mutex_unlock(&mtk_flow_offload_mutex); + + return err; +} + +static int +mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f) +{ + struct mtk_mac *mac = netdev_priv(dev); + struct mtk_eth *eth = mac->hw; + static LIST_HEAD(block_cb_list); + struct flow_block_cb *block_cb; + flow_setup_cb_t *cb; + + if (!eth->soc->offload_version) + return -EOPNOTSUPP; + + if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + cb = mtk_eth_setup_tc_block_cb; + f->driver_block_list = &block_cb_list; + + switch (f->command) { + case FLOW_BLOCK_BIND: + block_cb = flow_block_cb_lookup(f->block, cb, dev); + if (block_cb) { + flow_block_cb_incref(block_cb); + return 0; + } + block_cb = flow_block_cb_alloc(cb, dev, dev, NULL); + if (IS_ERR(block_cb)) + return PTR_ERR(block_cb); + + flow_block_cb_incref(block_cb); + flow_block_cb_add(block_cb, f); + list_add_tail(&block_cb->driver_list, &block_cb_list); + return 0; + case FLOW_BLOCK_UNBIND: + block_cb = flow_block_cb_lookup(f->block, cb, dev); + if (!block_cb) + return -ENOENT; + + if (!flow_block_cb_decref(block_cb)) { + flow_block_cb_remove(block_cb, f); + list_del(&block_cb->driver_list); + } + return 0; + default: + return -EOPNOTSUPP; + } +} + +int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + case TC_SETUP_FT: + return mtk_eth_setup_tc_block(dev, type_data); + default: + return -EOPNOTSUPP; + } +} + +int mtk_eth_offload_init(struct mtk_eth *eth) +{ + return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); +} diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_regs.h b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h new file mode 100644 index 000000000..59596d823 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_ppe_regs.h @@ -0,0 +1,152 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_PPE_REGS_H +#define __MTK_PPE_REGS_H + +#define MTK_PPE_GLO_CFG 0x200 +#define MTK_PPE_GLO_CFG_EN BIT(0) +#define MTK_PPE_GLO_CFG_TSID_EN BIT(1) +#define MTK_PPE_GLO_CFG_IP4_L4_CS_DROP BIT(2) +#define MTK_PPE_GLO_CFG_IP4_CS_DROP BIT(3) +#define MTK_PPE_GLO_CFG_TTL0_DROP BIT(4) +#define MTK_PPE_GLO_CFG_PPE_BSWAP BIT(5) +#define MTK_PPE_GLO_CFG_PSE_HASH_OFS BIT(6) +#define MTK_PPE_GLO_CFG_MCAST_TB_EN BIT(7) +#define MTK_PPE_GLO_CFG_FLOW_DROP_KA BIT(8) +#define MTK_PPE_GLO_CFG_FLOW_DROP_UPDATE BIT(9) +#define MTK_PPE_GLO_CFG_UDP_LITE_EN BIT(10) +#define MTK_PPE_GLO_CFG_UDP_LEN_DROP BIT(11) +#define MTK_PPE_GLO_CFG_MCAST_ENTRIES GNEMASK(13, 12) +#define MTK_PPE_GLO_CFG_BUSY BIT(31) + +#define MTK_PPE_FLOW_CFG 0x204 +#define MTK_PPE_MD_TOAP_BYP_CRSN0 BIT(1) +#define MTK_PPE_MD_TOAP_BYP_CRSN1 BIT(2) +#define MTK_PPE_MD_TOAP_BYP_CRSN2 BIT(3) +#define MTK_PPE_FLOW_CFG_IP4_TCP_FRAG BIT(6) +#define MTK_PPE_FLOW_CFG_IP4_UDP_FRAG BIT(7) +#define MTK_PPE_FLOW_CFG_IP6_3T_ROUTE BIT(8) +#define MTK_PPE_FLOW_CFG_IP6_5T_ROUTE BIT(9) +#define MTK_PPE_FLOW_CFG_IP6_6RD BIT(10) +#define MTK_PPE_FLOW_CFG_IP4_NAT BIT(12) +#define MTK_PPE_FLOW_CFG_IP4_NAPT BIT(13) +#define MTK_PPE_FLOW_CFG_IP4_DSLITE BIT(14) +#define MTK_PPE_FLOW_CFG_L2_BRIDGE BIT(15) +#define MTK_PPE_FLOW_CFG_IP_PROTO_BLACKLIST BIT(16) +#define MTK_PPE_FLOW_CFG_IP4_NAT_FRAG BIT(17) +#define MTK_PPE_FLOW_CFG_IP4_HASH_FLOW_LABEL BIT(18) +#define MTK_PPE_FLOW_CFG_IP4_HASH_GRE_KEY BIT(19) +#define MTK_PPE_FLOW_CFG_IP6_HASH_GRE_KEY BIT(20) + +#define MTK_PPE_IP_PROTO_CHK 0x208 +#define MTK_PPE_IP_PROTO_CHK_IPV4 GENMASK(15, 0) +#define MTK_PPE_IP_PROTO_CHK_IPV6 GENMASK(31, 16) + +#define MTK_PPE_TB_CFG 0x21c +#define MTK_PPE_TB_CFG_ENTRY_NUM GENMASK(2, 0) +#define MTK_PPE_TB_CFG_ENTRY_80B BIT(3) +#define MTK_PPE_TB_CFG_SEARCH_MISS GENMASK(5, 4) +#define MTK_PPE_TB_CFG_AGE_PREBIND BIT(6) +#define MTK_PPE_TB_CFG_AGE_NON_L4 BIT(7) +#define MTK_PPE_TB_CFG_AGE_UNBIND BIT(8) +#define MTK_PPE_TB_CFG_AGE_TCP BIT(9) +#define MTK_PPE_TB_CFG_AGE_UDP BIT(10) +#define MTK_PPE_TB_CFG_AGE_TCP_FIN BIT(11) +#define MTK_PPE_TB_CFG_KEEPALIVE GENMASK(13, 12) +#define MTK_PPE_TB_CFG_HASH_MODE GENMASK(15, 14) +#define MTK_PPE_TB_CFG_SCAN_MODE GENMASK(17, 16) +#define MTK_PPE_TB_CFG_HASH_DEBUG GENMASK(19, 18) +#define MTK_PPE_TB_CFG_INFO_SEL BIT(20) + +enum { + MTK_PPE_SCAN_MODE_DISABLED, + MTK_PPE_SCAN_MODE_CHECK_AGE, + MTK_PPE_SCAN_MODE_KEEPALIVE_AGE, +}; + +enum { + MTK_PPE_KEEPALIVE_DISABLE, + MTK_PPE_KEEPALIVE_UNICAST_CPU, + MTK_PPE_KEEPALIVE_DUP_CPU = 3, +}; + +enum { + MTK_PPE_SEARCH_MISS_ACTION_DROP, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD = 2, + MTK_PPE_SEARCH_MISS_ACTION_FORWARD_BUILD = 3, +}; + +#define MTK_PPE_TB_BASE 0x220 + +#define MTK_PPE_TB_USED 0x224 +#define MTK_PPE_TB_USED_NUM GENMASK(13, 0) + +#define MTK_PPE_BIND_RATE 0x228 +#define MTK_PPE_BIND_RATE_BIND GENMASK(15, 0) +#define MTK_PPE_BIND_RATE_PREBIND GENMASK(31, 16) + +#define MTK_PPE_BIND_LIMIT0 0x22c +#define MTK_PPE_BIND_LIMIT0_QUARTER GENMASK(13, 0) +#define MTK_PPE_BIND_LIMIT0_HALF GENMASK(29, 16) + +#define MTK_PPE_BIND_LIMIT1 0x230 +#define MTK_PPE_BIND_LIMIT1_FULL GENMASK(13, 0) +#define MTK_PPE_BIND_LIMIT1_NON_L4 GENMASK(23, 16) + +#define MTK_PPE_KEEPALIVE 0x234 +#define MTK_PPE_KEEPALIVE_TIME GENMASK(15, 0) +#define MTK_PPE_KEEPALIVE_TIME_TCP GENMASK(23, 16) +#define MTK_PPE_KEEPALIVE_TIME_UDP GENMASK(31, 24) + +#define MTK_PPE_UNBIND_AGE 0x238 +#define MTK_PPE_UNBIND_AGE_MIN_PACKETS GENMASK(31, 16) +#define MTK_PPE_UNBIND_AGE_DELTA GENMASK(7, 0) + +#define MTK_PPE_BIND_AGE0 0x23c +#define MTK_PPE_BIND_AGE0_DELTA_NON_L4 GENMASK(30, 16) +#define MTK_PPE_BIND_AGE0_DELTA_UDP GENMASK(14, 0) + +#define MTK_PPE_BIND_AGE1 0x240 +#define MTK_PPE_BIND_AGE1_DELTA_TCP_FIN GENMASK(30, 16) +#define MTK_PPE_BIND_AGE1_DELTA_TCP GENMASK(14, 0) + +#define MTK_PPE_HASH_SEED 0x244 + +#define MTK_PPE_DEFAULT_CPU_PORT 0x248 +#define MTK_PPE_DEFAULT_CPU_PORT_MASK(_n) (GENMASK(2, 0) << ((_n) * 4)) + +#define MTK_PPE_DEFAULT_CPU_PORT1 0x24c + +#define MTK_PPE_MTU_DROP 0x308 + +#define MTK_PPE_VLAN_MTU0 0x30c +#define MTK_PPE_VLAN_MTU0_NONE GENMASK(13, 0) +#define MTK_PPE_VLAN_MTU0_1TAG GENMASK(29, 16) + +#define MTK_PPE_VLAN_MTU1 0x310 +#define MTK_PPE_VLAN_MTU1_2TAG GENMASK(13, 0) +#define MTK_PPE_VLAN_MTU1_3TAG GENMASK(29, 16) + +#define MTK_PPE_VPM_TPID 0x318 + +#define MTK_PPE_CACHE_CTL 0x320 +#define MTK_PPE_CACHE_CTL_EN BIT(0) +#define MTK_PPE_CACHE_CTL_LOCK_CLR BIT(4) +#define MTK_PPE_CACHE_CTL_REQ BIT(8) +#define MTK_PPE_CACHE_CTL_CLEAR BIT(9) +#define MTK_PPE_CACHE_CTL_CMD GENMASK(13, 12) + +#define MTK_PPE_MIB_CFG 0x334 +#define MTK_PPE_MIB_CFG_EN BIT(0) +#define MTK_PPE_MIB_CFG_RD_CLR BIT(1) + +#define MTK_PPE_MIB_TB_BASE 0x338 + +#define MTK_PPE_MIB_CACHE_CTL 0x350 +#define MTK_PPE_MIB_CACHE_CTL_EN BIT(0) +#define MTK_PPE_MIB_CACHE_CTL_FLUSH BIT(2) + +#define MTK_PPE_SBW_CTRL 0x374 + +#endif diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c new file mode 100644 index 000000000..736839c84 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (c) 2018-2019 MediaTek Inc. + +/* A library for MediaTek SGMII circuit + * + * Author: Sean Wang <sean.wang@mediatek.com> + * + */ + +#include <linux/mfd/syscon.h> +#include <linux/of.h> +#include <linux/phylink.h> +#include <linux/regmap.h> + +#include "mtk_eth_soc.h" + +static struct mtk_pcs *pcs_to_mtk_pcs(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct mtk_pcs, pcs); +} + +/* For SGMII interface mode */ +static int mtk_pcs_setup_mode_an(struct mtk_pcs *mpcs) +{ + unsigned int val; + + /* Setup the link timer and QPHY power up inside SGMIISYS */ + regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, + SGMII_LINK_TIMER_DEFAULT); + + regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); + val |= SGMII_REMOTE_FAULT_DIS; + regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); + + regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); + val |= SGMII_AN_RESTART; + regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); + + regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val); + val &= ~SGMII_PHYA_PWD; + regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val); + + return 0; + +} + +/* For 1000BASE-X and 2500BASE-X interface modes, which operate at a + * fixed speed. + */ +static int mtk_pcs_setup_mode_force(struct mtk_pcs *mpcs, + phy_interface_t interface) +{ + unsigned int val; + + regmap_read(mpcs->regmap, mpcs->ana_rgc3, &val); + val &= ~RG_PHY_SPEED_MASK; + if (interface == PHY_INTERFACE_MODE_2500BASEX) + val |= RG_PHY_SPEED_3_125G; + regmap_write(mpcs->regmap, mpcs->ana_rgc3, val); + + /* Disable SGMII AN */ + regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); + val &= ~SGMII_AN_ENABLE; + regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); + + /* Set the speed etc but leave the duplex unchanged */ + regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); + val &= SGMII_DUPLEX_FULL | ~SGMII_IF_MODE_MASK; + val |= SGMII_SPEED_1000; + regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); + + /* Release PHYA power down state */ + regmap_read(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, &val); + val &= ~SGMII_PHYA_PWD; + regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, val); + + return 0; +} + +static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); + int err = 0; + + /* Setup SGMIISYS with the determined property */ + if (interface != PHY_INTERFACE_MODE_SGMII) + err = mtk_pcs_setup_mode_force(mpcs, interface); + else if (phylink_autoneg_inband(mode)) + err = mtk_pcs_setup_mode_an(mpcs); + + return err; +} + +static void mtk_pcs_restart_an(struct phylink_pcs *pcs) +{ + struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); + unsigned int val; + + regmap_read(mpcs->regmap, SGMSYS_PCS_CONTROL_1, &val); + val |= SGMII_AN_RESTART; + regmap_write(mpcs->regmap, SGMSYS_PCS_CONTROL_1, val); +} + +static void mtk_pcs_link_up(struct phylink_pcs *pcs, unsigned int mode, + phy_interface_t interface, int speed, int duplex) +{ + struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); + unsigned int val; + + if (!phy_interface_mode_is_8023z(interface)) + return; + + /* SGMII force duplex setting */ + regmap_read(mpcs->regmap, SGMSYS_SGMII_MODE, &val); + val &= ~SGMII_DUPLEX_FULL; + if (duplex == DUPLEX_FULL) + val |= SGMII_DUPLEX_FULL; + + regmap_write(mpcs->regmap, SGMSYS_SGMII_MODE, val); +} + +static const struct phylink_pcs_ops mtk_pcs_ops = { + .pcs_config = mtk_pcs_config, + .pcs_an_restart = mtk_pcs_restart_an, + .pcs_link_up = mtk_pcs_link_up, +}; + +int mtk_sgmii_init(struct mtk_sgmii *ss, struct device_node *r, u32 ana_rgc3) +{ + struct device_node *np; + int i; + + for (i = 0; i < MTK_MAX_DEVS; i++) { + np = of_parse_phandle(r, "mediatek,sgmiisys", i); + if (!np) + break; + + ss->pcs[i].ana_rgc3 = ana_rgc3; + ss->pcs[i].regmap = syscon_node_to_regmap(np); + of_node_put(np); + if (IS_ERR(ss->pcs[i].regmap)) + return PTR_ERR(ss->pcs[i].regmap); + + ss->pcs[i].pcs.ops = &mtk_pcs_ops; + } + + return 0; +} + +struct phylink_pcs *mtk_sgmii_select_pcs(struct mtk_sgmii *ss, int id) +{ + if (!ss->pcs[id].regmap) + return NULL; + + return &ss->pcs[id].pcs; +} diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c new file mode 100644 index 000000000..705035125 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -0,0 +1,1761 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2020 MediaTek Corporation + * Copyright (c) 2020 BayLibre SAS + * + * Author: Bartosz Golaszewski <bgolaszewski@baylibre.com> + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/compiler.h> +#include <linux/dma-mapping.h> +#include <linux/etherdevice.h> +#include <linux/kernel.h> +#include <linux/mfd/syscon.h> +#include <linux/mii.h> +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_mdio.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/regmap.h> +#include <linux/skbuff.h> +#include <linux/spinlock.h> + +#define MTK_STAR_DRVNAME "mtk_star_emac" + +#define MTK_STAR_WAIT_TIMEOUT 300 +#define MTK_STAR_MAX_FRAME_SIZE 1514 +#define MTK_STAR_SKB_ALIGNMENT 16 +#define MTK_STAR_HASHTABLE_MC_LIMIT 256 +#define MTK_STAR_HASHTABLE_SIZE_MAX 512 +#define MTK_STAR_DESC_NEEDED (MAX_SKB_FRAGS + 4) + +/* Normally we'd use NET_IP_ALIGN but on arm64 its value is 0 and it doesn't + * work for this controller. + */ +#define MTK_STAR_IP_ALIGN 2 + +static const char *const mtk_star_clk_names[] = { "core", "reg", "trans" }; +#define MTK_STAR_NCLKS ARRAY_SIZE(mtk_star_clk_names) + +/* PHY Control Register 0 */ +#define MTK_STAR_REG_PHY_CTRL0 0x0000 +#define MTK_STAR_BIT_PHY_CTRL0_WTCMD BIT(13) +#define MTK_STAR_BIT_PHY_CTRL0_RDCMD BIT(14) +#define MTK_STAR_BIT_PHY_CTRL0_RWOK BIT(15) +#define MTK_STAR_MSK_PHY_CTRL0_PREG GENMASK(12, 8) +#define MTK_STAR_OFF_PHY_CTRL0_PREG 8 +#define MTK_STAR_MSK_PHY_CTRL0_RWDATA GENMASK(31, 16) +#define MTK_STAR_OFF_PHY_CTRL0_RWDATA 16 + +/* PHY Control Register 1 */ +#define MTK_STAR_REG_PHY_CTRL1 0x0004 +#define MTK_STAR_BIT_PHY_CTRL1_LINK_ST BIT(0) +#define MTK_STAR_BIT_PHY_CTRL1_AN_EN BIT(8) +#define MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD 9 +#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M 0x00 +#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M 0x01 +#define MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M 0x02 +#define MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX BIT(11) +#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX BIT(12) +#define MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX BIT(13) + +/* MAC Configuration Register */ +#define MTK_STAR_REG_MAC_CFG 0x0008 +#define MTK_STAR_OFF_MAC_CFG_IPG 10 +#define MTK_STAR_VAL_MAC_CFG_IPG_96BIT GENMASK(4, 0) +#define MTK_STAR_BIT_MAC_CFG_MAXLEN_1522 BIT(16) +#define MTK_STAR_BIT_MAC_CFG_AUTO_PAD BIT(19) +#define MTK_STAR_BIT_MAC_CFG_CRC_STRIP BIT(20) +#define MTK_STAR_BIT_MAC_CFG_VLAN_STRIP BIT(22) +#define MTK_STAR_BIT_MAC_CFG_NIC_PD BIT(31) + +/* Flow-Control Configuration Register */ +#define MTK_STAR_REG_FC_CFG 0x000c +#define MTK_STAR_BIT_FC_CFG_BP_EN BIT(7) +#define MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR BIT(8) +#define MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH 16 +#define MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH GENMASK(27, 16) +#define MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K 0x800 + +/* ARL Configuration Register */ +#define MTK_STAR_REG_ARL_CFG 0x0010 +#define MTK_STAR_BIT_ARL_CFG_HASH_ALG BIT(0) +#define MTK_STAR_BIT_ARL_CFG_MISC_MODE BIT(4) + +/* MAC High and Low Bytes Registers */ +#define MTK_STAR_REG_MY_MAC_H 0x0014 +#define MTK_STAR_REG_MY_MAC_L 0x0018 + +/* Hash Table Control Register */ +#define MTK_STAR_REG_HASH_CTRL 0x001c +#define MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR GENMASK(8, 0) +#define MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA BIT(12) +#define MTK_STAR_BIT_HASH_CTRL_ACC_CMD BIT(13) +#define MTK_STAR_BIT_HASH_CTRL_CMD_START BIT(14) +#define MTK_STAR_BIT_HASH_CTRL_BIST_OK BIT(16) +#define MTK_STAR_BIT_HASH_CTRL_BIST_DONE BIT(17) +#define MTK_STAR_BIT_HASH_CTRL_BIST_EN BIT(31) + +/* TX DMA Control Register */ +#define MTK_STAR_REG_TX_DMA_CTRL 0x0034 +#define MTK_STAR_BIT_TX_DMA_CTRL_START BIT(0) +#define MTK_STAR_BIT_TX_DMA_CTRL_STOP BIT(1) +#define MTK_STAR_BIT_TX_DMA_CTRL_RESUME BIT(2) + +/* RX DMA Control Register */ +#define MTK_STAR_REG_RX_DMA_CTRL 0x0038 +#define MTK_STAR_BIT_RX_DMA_CTRL_START BIT(0) +#define MTK_STAR_BIT_RX_DMA_CTRL_STOP BIT(1) +#define MTK_STAR_BIT_RX_DMA_CTRL_RESUME BIT(2) + +/* DMA Address Registers */ +#define MTK_STAR_REG_TX_DPTR 0x003c +#define MTK_STAR_REG_RX_DPTR 0x0040 +#define MTK_STAR_REG_TX_BASE_ADDR 0x0044 +#define MTK_STAR_REG_RX_BASE_ADDR 0x0048 + +/* Interrupt Status Register */ +#define MTK_STAR_REG_INT_STS 0x0050 +#define MTK_STAR_REG_INT_STS_PORT_STS_CHG BIT(2) +#define MTK_STAR_REG_INT_STS_MIB_CNT_TH BIT(3) +#define MTK_STAR_BIT_INT_STS_FNRC BIT(6) +#define MTK_STAR_BIT_INT_STS_TNTC BIT(8) + +/* Interrupt Mask Register */ +#define MTK_STAR_REG_INT_MASK 0x0054 +#define MTK_STAR_BIT_INT_MASK_FNRC BIT(6) + +/* Delay-Macro Register */ +#define MTK_STAR_REG_TEST0 0x0058 +#define MTK_STAR_BIT_INV_RX_CLK BIT(30) +#define MTK_STAR_BIT_INV_TX_CLK BIT(31) + +/* Misc. Config Register */ +#define MTK_STAR_REG_TEST1 0x005c +#define MTK_STAR_BIT_TEST1_RST_HASH_MBIST BIT(31) + +/* Extended Configuration Register */ +#define MTK_STAR_REG_EXT_CFG 0x0060 +#define MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS 16 +#define MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS GENMASK(26, 16) +#define MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K 0x400 + +/* EthSys Configuration Register */ +#define MTK_STAR_REG_SYS_CONF 0x0094 +#define MTK_STAR_BIT_MII_PAD_OUT_ENABLE BIT(0) +#define MTK_STAR_BIT_EXT_MDC_MODE BIT(1) +#define MTK_STAR_BIT_SWC_MII_MODE BIT(2) + +/* MAC Clock Configuration Register */ +#define MTK_STAR_REG_MAC_CLK_CONF 0x00ac +#define MTK_STAR_MSK_MAC_CLK_CONF GENMASK(7, 0) +#define MTK_STAR_BIT_CLK_DIV_10 0x0a +#define MTK_STAR_BIT_CLK_DIV_50 0x32 + +/* Counter registers. */ +#define MTK_STAR_REG_C_RXOKPKT 0x0100 +#define MTK_STAR_REG_C_RXOKBYTE 0x0104 +#define MTK_STAR_REG_C_RXRUNT 0x0108 +#define MTK_STAR_REG_C_RXLONG 0x010c +#define MTK_STAR_REG_C_RXDROP 0x0110 +#define MTK_STAR_REG_C_RXCRC 0x0114 +#define MTK_STAR_REG_C_RXARLDROP 0x0118 +#define MTK_STAR_REG_C_RXVLANDROP 0x011c +#define MTK_STAR_REG_C_RXCSERR 0x0120 +#define MTK_STAR_REG_C_RXPAUSE 0x0124 +#define MTK_STAR_REG_C_TXOKPKT 0x0128 +#define MTK_STAR_REG_C_TXOKBYTE 0x012c +#define MTK_STAR_REG_C_TXPAUSECOL 0x0130 +#define MTK_STAR_REG_C_TXRTY 0x0134 +#define MTK_STAR_REG_C_TXSKIP 0x0138 +#define MTK_STAR_REG_C_TX_ARP 0x013c +#define MTK_STAR_REG_C_RX_RERR 0x01d8 +#define MTK_STAR_REG_C_RX_UNI 0x01dc +#define MTK_STAR_REG_C_RX_MULTI 0x01e0 +#define MTK_STAR_REG_C_RX_BROAD 0x01e4 +#define MTK_STAR_REG_C_RX_ALIGNERR 0x01e8 +#define MTK_STAR_REG_C_TX_UNI 0x01ec +#define MTK_STAR_REG_C_TX_MULTI 0x01f0 +#define MTK_STAR_REG_C_TX_BROAD 0x01f4 +#define MTK_STAR_REG_C_TX_TIMEOUT 0x01f8 +#define MTK_STAR_REG_C_TX_LATECOL 0x01fc +#define MTK_STAR_REG_C_RX_LENGTHERR 0x0214 +#define MTK_STAR_REG_C_RX_TWIST 0x0218 + +/* Ethernet CFG Control */ +#define MTK_PERICFG_REG_NIC_CFG0_CON 0x03c4 +#define MTK_PERICFG_REG_NIC_CFG1_CON 0x03c8 +#define MTK_PERICFG_REG_NIC_CFG_CON_V2 0x0c10 +#define MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF GENMASK(3, 0) +#define MTK_PERICFG_BIT_NIC_CFG_CON_MII 0 +#define MTK_PERICFG_BIT_NIC_CFG_CON_RMII 1 +#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK BIT(0) +#define MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2 BIT(8) + +/* Represents the actual structure of descriptors used by the MAC. We can + * reuse the same structure for both TX and RX - the layout is the same, only + * the flags differ slightly. + */ +struct mtk_star_ring_desc { + /* Contains both the status flags as well as packet length. */ + u32 status; + u32 data_ptr; + u32 vtag; + u32 reserved; +}; + +#define MTK_STAR_DESC_MSK_LEN GENMASK(15, 0) +#define MTK_STAR_DESC_BIT_RX_CRCE BIT(24) +#define MTK_STAR_DESC_BIT_RX_OSIZE BIT(25) +#define MTK_STAR_DESC_BIT_INT BIT(27) +#define MTK_STAR_DESC_BIT_LS BIT(28) +#define MTK_STAR_DESC_BIT_FS BIT(29) +#define MTK_STAR_DESC_BIT_EOR BIT(30) +#define MTK_STAR_DESC_BIT_COWN BIT(31) + +/* Helper structure for storing data read from/written to descriptors in order + * to limit reads from/writes to DMA memory. + */ +struct mtk_star_ring_desc_data { + unsigned int len; + unsigned int flags; + dma_addr_t dma_addr; + struct sk_buff *skb; +}; + +#define MTK_STAR_RING_NUM_DESCS 512 +#define MTK_STAR_TX_THRESH (MTK_STAR_RING_NUM_DESCS / 4) +#define MTK_STAR_NUM_TX_DESCS MTK_STAR_RING_NUM_DESCS +#define MTK_STAR_NUM_RX_DESCS MTK_STAR_RING_NUM_DESCS +#define MTK_STAR_NUM_DESCS_TOTAL (MTK_STAR_RING_NUM_DESCS * 2) +#define MTK_STAR_DMA_SIZE \ + (MTK_STAR_NUM_DESCS_TOTAL * sizeof(struct mtk_star_ring_desc)) + +struct mtk_star_ring { + struct mtk_star_ring_desc *descs; + struct sk_buff *skbs[MTK_STAR_RING_NUM_DESCS]; + dma_addr_t dma_addrs[MTK_STAR_RING_NUM_DESCS]; + unsigned int head; + unsigned int tail; +}; + +struct mtk_star_compat { + int (*set_interface_mode)(struct net_device *ndev); + unsigned char bit_clk_div; +}; + +struct mtk_star_priv { + struct net_device *ndev; + + struct regmap *regs; + struct regmap *pericfg; + + struct clk_bulk_data clks[MTK_STAR_NCLKS]; + + void *ring_base; + struct mtk_star_ring_desc *descs_base; + dma_addr_t dma_addr; + struct mtk_star_ring tx_ring; + struct mtk_star_ring rx_ring; + + struct mii_bus *mii; + struct napi_struct tx_napi; + struct napi_struct rx_napi; + + struct device_node *phy_node; + phy_interface_t phy_intf; + struct phy_device *phydev; + unsigned int link; + int speed; + int duplex; + int pause; + bool rmii_rxc; + bool rx_inv; + bool tx_inv; + + const struct mtk_star_compat *compat_data; + + /* Protects against concurrent descriptor access. */ + spinlock_t lock; + + struct rtnl_link_stats64 stats; +}; + +static struct device *mtk_star_get_dev(struct mtk_star_priv *priv) +{ + return priv->ndev->dev.parent; +} + +static const struct regmap_config mtk_star_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .disable_locking = true, +}; + +static void mtk_star_ring_init(struct mtk_star_ring *ring, + struct mtk_star_ring_desc *descs) +{ + memset(ring, 0, sizeof(*ring)); + ring->descs = descs; + ring->head = 0; + ring->tail = 0; +} + +static int mtk_star_ring_pop_tail(struct mtk_star_ring *ring, + struct mtk_star_ring_desc_data *desc_data) +{ + struct mtk_star_ring_desc *desc = &ring->descs[ring->tail]; + unsigned int status; + + status = READ_ONCE(desc->status); + dma_rmb(); /* Make sure we read the status bits before checking it. */ + + if (!(status & MTK_STAR_DESC_BIT_COWN)) + return -1; + + desc_data->len = status & MTK_STAR_DESC_MSK_LEN; + desc_data->flags = status & ~MTK_STAR_DESC_MSK_LEN; + desc_data->dma_addr = ring->dma_addrs[ring->tail]; + desc_data->skb = ring->skbs[ring->tail]; + + ring->dma_addrs[ring->tail] = 0; + ring->skbs[ring->tail] = NULL; + + status &= MTK_STAR_DESC_BIT_COWN | MTK_STAR_DESC_BIT_EOR; + + WRITE_ONCE(desc->data_ptr, 0); + WRITE_ONCE(desc->status, status); + + ring->tail = (ring->tail + 1) % MTK_STAR_RING_NUM_DESCS; + + return 0; +} + +static void mtk_star_ring_push_head(struct mtk_star_ring *ring, + struct mtk_star_ring_desc_data *desc_data, + unsigned int flags) +{ + struct mtk_star_ring_desc *desc = &ring->descs[ring->head]; + unsigned int status; + + status = READ_ONCE(desc->status); + + ring->skbs[ring->head] = desc_data->skb; + ring->dma_addrs[ring->head] = desc_data->dma_addr; + + status |= desc_data->len; + if (flags) + status |= flags; + + WRITE_ONCE(desc->data_ptr, desc_data->dma_addr); + WRITE_ONCE(desc->status, status); + status &= ~MTK_STAR_DESC_BIT_COWN; + /* Flush previous modifications before ownership change. */ + dma_wmb(); + WRITE_ONCE(desc->status, status); + + ring->head = (ring->head + 1) % MTK_STAR_RING_NUM_DESCS; +} + +static void +mtk_star_ring_push_head_rx(struct mtk_star_ring *ring, + struct mtk_star_ring_desc_data *desc_data) +{ + mtk_star_ring_push_head(ring, desc_data, 0); +} + +static void +mtk_star_ring_push_head_tx(struct mtk_star_ring *ring, + struct mtk_star_ring_desc_data *desc_data) +{ + static const unsigned int flags = MTK_STAR_DESC_BIT_FS | + MTK_STAR_DESC_BIT_LS | + MTK_STAR_DESC_BIT_INT; + + mtk_star_ring_push_head(ring, desc_data, flags); +} + +static unsigned int mtk_star_tx_ring_avail(struct mtk_star_ring *ring) +{ + u32 avail; + + if (ring->tail > ring->head) + avail = ring->tail - ring->head - 1; + else + avail = MTK_STAR_RING_NUM_DESCS - ring->head + ring->tail - 1; + + return avail; +} + +static dma_addr_t mtk_star_dma_map_rx(struct mtk_star_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = mtk_star_get_dev(priv); + + /* Data pointer for the RX DMA descriptor must be aligned to 4N + 2. */ + return dma_map_single(dev, skb_tail_pointer(skb) - 2, + skb_tailroom(skb), DMA_FROM_DEVICE); +} + +static void mtk_star_dma_unmap_rx(struct mtk_star_priv *priv, + struct mtk_star_ring_desc_data *desc_data) +{ + struct device *dev = mtk_star_get_dev(priv); + + dma_unmap_single(dev, desc_data->dma_addr, + skb_tailroom(desc_data->skb), DMA_FROM_DEVICE); +} + +static dma_addr_t mtk_star_dma_map_tx(struct mtk_star_priv *priv, + struct sk_buff *skb) +{ + struct device *dev = mtk_star_get_dev(priv); + + return dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); +} + +static void mtk_star_dma_unmap_tx(struct mtk_star_priv *priv, + struct mtk_star_ring_desc_data *desc_data) +{ + struct device *dev = mtk_star_get_dev(priv); + + return dma_unmap_single(dev, desc_data->dma_addr, + skb_headlen(desc_data->skb), DMA_TO_DEVICE); +} + +static void mtk_star_nic_disable_pd(struct mtk_star_priv *priv) +{ + regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG, + MTK_STAR_BIT_MAC_CFG_NIC_PD); +} + +static void mtk_star_enable_dma_irq(struct mtk_star_priv *priv, + bool rx, bool tx) +{ + u32 value; + + regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value); + + if (tx) + value &= ~MTK_STAR_BIT_INT_STS_TNTC; + if (rx) + value &= ~MTK_STAR_BIT_INT_STS_FNRC; + + regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value); +} + +static void mtk_star_disable_dma_irq(struct mtk_star_priv *priv, + bool rx, bool tx) +{ + u32 value; + + regmap_read(priv->regs, MTK_STAR_REG_INT_MASK, &value); + + if (tx) + value |= MTK_STAR_BIT_INT_STS_TNTC; + if (rx) + value |= MTK_STAR_BIT_INT_STS_FNRC; + + regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, value); +} + +/* Unmask the three interrupts we care about, mask all others. */ +static void mtk_star_intr_enable(struct mtk_star_priv *priv) +{ + unsigned int val = MTK_STAR_BIT_INT_STS_TNTC | + MTK_STAR_BIT_INT_STS_FNRC | + MTK_STAR_REG_INT_STS_MIB_CNT_TH; + + regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~val); +} + +static void mtk_star_intr_disable(struct mtk_star_priv *priv) +{ + regmap_write(priv->regs, MTK_STAR_REG_INT_MASK, ~0); +} + +static unsigned int mtk_star_intr_ack_all(struct mtk_star_priv *priv) +{ + unsigned int val; + + regmap_read(priv->regs, MTK_STAR_REG_INT_STS, &val); + regmap_write(priv->regs, MTK_STAR_REG_INT_STS, val); + + return val; +} + +static void mtk_star_dma_init(struct mtk_star_priv *priv) +{ + struct mtk_star_ring_desc *desc; + unsigned int val; + int i; + + priv->descs_base = (struct mtk_star_ring_desc *)priv->ring_base; + + for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) { + desc = &priv->descs_base[i]; + + memset(desc, 0, sizeof(*desc)); + desc->status = MTK_STAR_DESC_BIT_COWN; + if ((i == MTK_STAR_NUM_TX_DESCS - 1) || + (i == MTK_STAR_NUM_DESCS_TOTAL - 1)) + desc->status |= MTK_STAR_DESC_BIT_EOR; + } + + mtk_star_ring_init(&priv->tx_ring, priv->descs_base); + mtk_star_ring_init(&priv->rx_ring, + priv->descs_base + MTK_STAR_NUM_TX_DESCS); + + /* Set DMA pointers. */ + val = (unsigned int)priv->dma_addr; + regmap_write(priv->regs, MTK_STAR_REG_TX_BASE_ADDR, val); + regmap_write(priv->regs, MTK_STAR_REG_TX_DPTR, val); + + val += sizeof(struct mtk_star_ring_desc) * MTK_STAR_NUM_TX_DESCS; + regmap_write(priv->regs, MTK_STAR_REG_RX_BASE_ADDR, val); + regmap_write(priv->regs, MTK_STAR_REG_RX_DPTR, val); +} + +static void mtk_star_dma_start(struct mtk_star_priv *priv) +{ + regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL, + MTK_STAR_BIT_TX_DMA_CTRL_START); + regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL, + MTK_STAR_BIT_RX_DMA_CTRL_START); +} + +static void mtk_star_dma_stop(struct mtk_star_priv *priv) +{ + regmap_write(priv->regs, MTK_STAR_REG_TX_DMA_CTRL, + MTK_STAR_BIT_TX_DMA_CTRL_STOP); + regmap_write(priv->regs, MTK_STAR_REG_RX_DMA_CTRL, + MTK_STAR_BIT_RX_DMA_CTRL_STOP); +} + +static void mtk_star_dma_disable(struct mtk_star_priv *priv) +{ + int i; + + mtk_star_dma_stop(priv); + + /* Take back all descriptors. */ + for (i = 0; i < MTK_STAR_NUM_DESCS_TOTAL; i++) + priv->descs_base[i].status |= MTK_STAR_DESC_BIT_COWN; +} + +static void mtk_star_dma_resume_rx(struct mtk_star_priv *priv) +{ + regmap_set_bits(priv->regs, MTK_STAR_REG_RX_DMA_CTRL, + MTK_STAR_BIT_RX_DMA_CTRL_RESUME); +} + +static void mtk_star_dma_resume_tx(struct mtk_star_priv *priv) +{ + regmap_set_bits(priv->regs, MTK_STAR_REG_TX_DMA_CTRL, + MTK_STAR_BIT_TX_DMA_CTRL_RESUME); +} + +static void mtk_star_set_mac_addr(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + const u8 *mac_addr = ndev->dev_addr; + unsigned int high, low; + + high = mac_addr[0] << 8 | mac_addr[1] << 0; + low = mac_addr[2] << 24 | mac_addr[3] << 16 | + mac_addr[4] << 8 | mac_addr[5]; + + regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_H, high); + regmap_write(priv->regs, MTK_STAR_REG_MY_MAC_L, low); +} + +static void mtk_star_reset_counters(struct mtk_star_priv *priv) +{ + static const unsigned int counter_regs[] = { + MTK_STAR_REG_C_RXOKPKT, + MTK_STAR_REG_C_RXOKBYTE, + MTK_STAR_REG_C_RXRUNT, + MTK_STAR_REG_C_RXLONG, + MTK_STAR_REG_C_RXDROP, + MTK_STAR_REG_C_RXCRC, + MTK_STAR_REG_C_RXARLDROP, + MTK_STAR_REG_C_RXVLANDROP, + MTK_STAR_REG_C_RXCSERR, + MTK_STAR_REG_C_RXPAUSE, + MTK_STAR_REG_C_TXOKPKT, + MTK_STAR_REG_C_TXOKBYTE, + MTK_STAR_REG_C_TXPAUSECOL, + MTK_STAR_REG_C_TXRTY, + MTK_STAR_REG_C_TXSKIP, + MTK_STAR_REG_C_TX_ARP, + MTK_STAR_REG_C_RX_RERR, + MTK_STAR_REG_C_RX_UNI, + MTK_STAR_REG_C_RX_MULTI, + MTK_STAR_REG_C_RX_BROAD, + MTK_STAR_REG_C_RX_ALIGNERR, + MTK_STAR_REG_C_TX_UNI, + MTK_STAR_REG_C_TX_MULTI, + MTK_STAR_REG_C_TX_BROAD, + MTK_STAR_REG_C_TX_TIMEOUT, + MTK_STAR_REG_C_TX_LATECOL, + MTK_STAR_REG_C_RX_LENGTHERR, + MTK_STAR_REG_C_RX_TWIST, + }; + + unsigned int i, val; + + for (i = 0; i < ARRAY_SIZE(counter_regs); i++) + regmap_read(priv->regs, counter_regs[i], &val); +} + +static void mtk_star_update_stat(struct mtk_star_priv *priv, + unsigned int reg, u64 *stat) +{ + unsigned int val; + + regmap_read(priv->regs, reg, &val); + *stat += val; +} + +/* Try to get as many stats as possible from the internal registers instead + * of tracking them ourselves. + */ +static void mtk_star_update_stats(struct mtk_star_priv *priv) +{ + struct rtnl_link_stats64 *stats = &priv->stats; + + /* OK packets and bytes. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKPKT, &stats->rx_packets); + mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKPKT, &stats->tx_packets); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXOKBYTE, &stats->rx_bytes); + mtk_star_update_stat(priv, MTK_STAR_REG_C_TXOKBYTE, &stats->tx_bytes); + + /* RX & TX multicast. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_MULTI, &stats->multicast); + mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_MULTI, &stats->multicast); + + /* Collisions. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_TXPAUSECOL, + &stats->collisions); + mtk_star_update_stat(priv, MTK_STAR_REG_C_TX_LATECOL, + &stats->collisions); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXRUNT, &stats->collisions); + + /* RX Errors. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_LENGTHERR, + &stats->rx_length_errors); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXLONG, + &stats->rx_over_errors); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXCRC, &stats->rx_crc_errors); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_ALIGNERR, + &stats->rx_frame_errors); + mtk_star_update_stat(priv, MTK_STAR_REG_C_RXDROP, + &stats->rx_fifo_errors); + /* Sum of the general RX error counter + all of the above. */ + mtk_star_update_stat(priv, MTK_STAR_REG_C_RX_RERR, &stats->rx_errors); + stats->rx_errors += stats->rx_length_errors; + stats->rx_errors += stats->rx_over_errors; + stats->rx_errors += stats->rx_crc_errors; + stats->rx_errors += stats->rx_frame_errors; + stats->rx_errors += stats->rx_fifo_errors; +} + +static struct sk_buff *mtk_star_alloc_skb(struct net_device *ndev) +{ + uintptr_t tail, offset; + struct sk_buff *skb; + + skb = dev_alloc_skb(MTK_STAR_MAX_FRAME_SIZE); + if (!skb) + return NULL; + + /* Align to 16 bytes. */ + tail = (uintptr_t)skb_tail_pointer(skb); + if (tail & (MTK_STAR_SKB_ALIGNMENT - 1)) { + offset = tail & (MTK_STAR_SKB_ALIGNMENT - 1); + skb_reserve(skb, MTK_STAR_SKB_ALIGNMENT - offset); + } + + /* Ensure 16-byte alignment of the skb pointer: eth_type_trans() will + * extract the Ethernet header (14 bytes) so we need two more bytes. + */ + skb_reserve(skb, MTK_STAR_IP_ALIGN); + + return skb; +} + +static int mtk_star_prepare_rx_skbs(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct mtk_star_ring *ring = &priv->rx_ring; + struct device *dev = mtk_star_get_dev(priv); + struct mtk_star_ring_desc *desc; + struct sk_buff *skb; + dma_addr_t dma_addr; + int i; + + for (i = 0; i < MTK_STAR_NUM_RX_DESCS; i++) { + skb = mtk_star_alloc_skb(ndev); + if (!skb) + return -ENOMEM; + + dma_addr = mtk_star_dma_map_rx(priv, skb); + if (dma_mapping_error(dev, dma_addr)) { + dev_kfree_skb(skb); + return -ENOMEM; + } + + desc = &ring->descs[i]; + desc->data_ptr = dma_addr; + desc->status |= skb_tailroom(skb) & MTK_STAR_DESC_MSK_LEN; + desc->status &= ~MTK_STAR_DESC_BIT_COWN; + ring->skbs[i] = skb; + ring->dma_addrs[i] = dma_addr; + } + + return 0; +} + +static void +mtk_star_ring_free_skbs(struct mtk_star_priv *priv, struct mtk_star_ring *ring, + void (*unmap_func)(struct mtk_star_priv *, + struct mtk_star_ring_desc_data *)) +{ + struct mtk_star_ring_desc_data desc_data; + int i; + + for (i = 0; i < MTK_STAR_RING_NUM_DESCS; i++) { + if (!ring->dma_addrs[i]) + continue; + + desc_data.dma_addr = ring->dma_addrs[i]; + desc_data.skb = ring->skbs[i]; + + unmap_func(priv, &desc_data); + dev_kfree_skb(desc_data.skb); + } +} + +static void mtk_star_free_rx_skbs(struct mtk_star_priv *priv) +{ + struct mtk_star_ring *ring = &priv->rx_ring; + + mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_rx); +} + +static void mtk_star_free_tx_skbs(struct mtk_star_priv *priv) +{ + struct mtk_star_ring *ring = &priv->tx_ring; + + mtk_star_ring_free_skbs(priv, ring, mtk_star_dma_unmap_tx); +} + +/** + * mtk_star_handle_irq - Interrupt Handler. + * @irq: interrupt number. + * @data: pointer to a network interface device structure. + * Description : this is the driver interrupt service routine. + * it mainly handles: + * 1. tx complete interrupt for frame transmission. + * 2. rx complete interrupt for frame reception. + * 3. MAC Management Counter interrupt to avoid counter overflow. + **/ +static irqreturn_t mtk_star_handle_irq(int irq, void *data) +{ + struct net_device *ndev = data; + struct mtk_star_priv *priv = netdev_priv(ndev); + unsigned int intr_status = mtk_star_intr_ack_all(priv); + bool rx, tx; + + rx = (intr_status & MTK_STAR_BIT_INT_STS_FNRC) && + napi_schedule_prep(&priv->rx_napi); + tx = (intr_status & MTK_STAR_BIT_INT_STS_TNTC) && + napi_schedule_prep(&priv->tx_napi); + + if (rx || tx) { + spin_lock(&priv->lock); + /* mask Rx and TX Complete interrupt */ + mtk_star_disable_dma_irq(priv, rx, tx); + spin_unlock(&priv->lock); + + if (rx) + __napi_schedule(&priv->rx_napi); + if (tx) + __napi_schedule(&priv->tx_napi); + } + + /* interrupt is triggered once any counters reach 0x8000000 */ + if (intr_status & MTK_STAR_REG_INT_STS_MIB_CNT_TH) { + mtk_star_update_stats(priv); + mtk_star_reset_counters(priv); + } + + return IRQ_HANDLED; +} + +/* Wait for the completion of any previous command - CMD_START bit must be + * cleared by hardware. + */ +static int mtk_star_hash_wait_cmd_start(struct mtk_star_priv *priv) +{ + unsigned int val; + + return regmap_read_poll_timeout_atomic(priv->regs, + MTK_STAR_REG_HASH_CTRL, val, + !(val & MTK_STAR_BIT_HASH_CTRL_CMD_START), + 10, MTK_STAR_WAIT_TIMEOUT); +} + +static int mtk_star_hash_wait_ok(struct mtk_star_priv *priv) +{ + unsigned int val; + int ret; + + /* Wait for BIST_DONE bit. */ + ret = regmap_read_poll_timeout_atomic(priv->regs, + MTK_STAR_REG_HASH_CTRL, val, + val & MTK_STAR_BIT_HASH_CTRL_BIST_DONE, + 10, MTK_STAR_WAIT_TIMEOUT); + if (ret) + return ret; + + /* Check the BIST_OK bit. */ + if (!regmap_test_bits(priv->regs, MTK_STAR_REG_HASH_CTRL, + MTK_STAR_BIT_HASH_CTRL_BIST_OK)) + return -EIO; + + return 0; +} + +static int mtk_star_set_hashbit(struct mtk_star_priv *priv, + unsigned int hash_addr) +{ + unsigned int val; + int ret; + + ret = mtk_star_hash_wait_cmd_start(priv); + if (ret) + return ret; + + val = hash_addr & MTK_STAR_MSK_HASH_CTRL_HASH_BIT_ADDR; + val |= MTK_STAR_BIT_HASH_CTRL_ACC_CMD; + val |= MTK_STAR_BIT_HASH_CTRL_CMD_START; + val |= MTK_STAR_BIT_HASH_CTRL_BIST_EN; + val |= MTK_STAR_BIT_HASH_CTRL_HASH_BIT_DATA; + regmap_write(priv->regs, MTK_STAR_REG_HASH_CTRL, val); + + return mtk_star_hash_wait_ok(priv); +} + +static int mtk_star_reset_hash_table(struct mtk_star_priv *priv) +{ + int ret; + + ret = mtk_star_hash_wait_cmd_start(priv); + if (ret) + return ret; + + regmap_set_bits(priv->regs, MTK_STAR_REG_HASH_CTRL, + MTK_STAR_BIT_HASH_CTRL_BIST_EN); + regmap_set_bits(priv->regs, MTK_STAR_REG_TEST1, + MTK_STAR_BIT_TEST1_RST_HASH_MBIST); + + return mtk_star_hash_wait_ok(priv); +} + +static void mtk_star_phy_config(struct mtk_star_priv *priv) +{ + unsigned int val; + + if (priv->speed == SPEED_1000) + val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_1000M; + else if (priv->speed == SPEED_100) + val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_100M; + else + val = MTK_STAR_VAL_PHY_CTRL1_FORCE_SPD_10M; + val <<= MTK_STAR_OFF_PHY_CTRL1_FORCE_SPD; + + val |= MTK_STAR_BIT_PHY_CTRL1_AN_EN; + if (priv->pause) { + val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX; + val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX; + val |= MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX; + } else { + val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_RX; + val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_FC_TX; + val &= ~MTK_STAR_BIT_PHY_CTRL1_FORCE_DPX; + } + regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL1, val); + + val = MTK_STAR_VAL_FC_CFG_SEND_PAUSE_TH_2K; + val <<= MTK_STAR_OFF_FC_CFG_SEND_PAUSE_TH; + val |= MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR; + regmap_update_bits(priv->regs, MTK_STAR_REG_FC_CFG, + MTK_STAR_MSK_FC_CFG_SEND_PAUSE_TH | + MTK_STAR_BIT_FC_CFG_UC_PAUSE_DIR, val); + + val = MTK_STAR_VAL_EXT_CFG_SND_PAUSE_RLS_1K; + val <<= MTK_STAR_OFF_EXT_CFG_SND_PAUSE_RLS; + regmap_update_bits(priv->regs, MTK_STAR_REG_EXT_CFG, + MTK_STAR_MSK_EXT_CFG_SND_PAUSE_RLS, val); +} + +static void mtk_star_adjust_link(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct phy_device *phydev = priv->phydev; + bool new_state = false; + + if (phydev->link) { + if (!priv->link) { + priv->link = phydev->link; + new_state = true; + } + + if (priv->speed != phydev->speed) { + priv->speed = phydev->speed; + new_state = true; + } + + if (priv->pause != phydev->pause) { + priv->pause = phydev->pause; + new_state = true; + } + } else { + if (priv->link) { + priv->link = phydev->link; + new_state = true; + } + } + + if (new_state) { + if (phydev->link) + mtk_star_phy_config(priv); + + phy_print_status(ndev->phydev); + } +} + +static void mtk_star_init_config(struct mtk_star_priv *priv) +{ + unsigned int val; + + val = (MTK_STAR_BIT_MII_PAD_OUT_ENABLE | + MTK_STAR_BIT_EXT_MDC_MODE | + MTK_STAR_BIT_SWC_MII_MODE); + + regmap_write(priv->regs, MTK_STAR_REG_SYS_CONF, val); + regmap_update_bits(priv->regs, MTK_STAR_REG_MAC_CLK_CONF, + MTK_STAR_MSK_MAC_CLK_CONF, + priv->compat_data->bit_clk_div); +} + +static int mtk_star_enable(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + unsigned int val; + int ret; + + mtk_star_nic_disable_pd(priv); + mtk_star_intr_disable(priv); + mtk_star_dma_stop(priv); + + mtk_star_set_mac_addr(ndev); + + /* Configure the MAC */ + val = MTK_STAR_VAL_MAC_CFG_IPG_96BIT; + val <<= MTK_STAR_OFF_MAC_CFG_IPG; + val |= MTK_STAR_BIT_MAC_CFG_MAXLEN_1522; + val |= MTK_STAR_BIT_MAC_CFG_AUTO_PAD; + val |= MTK_STAR_BIT_MAC_CFG_CRC_STRIP; + regmap_write(priv->regs, MTK_STAR_REG_MAC_CFG, val); + + /* Enable Hash Table BIST and reset it */ + ret = mtk_star_reset_hash_table(priv); + if (ret) + return ret; + + /* Setup the hashing algorithm */ + regmap_clear_bits(priv->regs, MTK_STAR_REG_ARL_CFG, + MTK_STAR_BIT_ARL_CFG_HASH_ALG | + MTK_STAR_BIT_ARL_CFG_MISC_MODE); + + /* Don't strip VLAN tags */ + regmap_clear_bits(priv->regs, MTK_STAR_REG_MAC_CFG, + MTK_STAR_BIT_MAC_CFG_VLAN_STRIP); + + /* Setup DMA */ + mtk_star_dma_init(priv); + + ret = mtk_star_prepare_rx_skbs(ndev); + if (ret) + goto err_out; + + /* Request the interrupt */ + ret = request_irq(ndev->irq, mtk_star_handle_irq, + IRQF_TRIGGER_NONE, ndev->name, ndev); + if (ret) + goto err_free_skbs; + + napi_enable(&priv->tx_napi); + napi_enable(&priv->rx_napi); + + mtk_star_intr_ack_all(priv); + mtk_star_intr_enable(priv); + + /* Connect to and start PHY */ + priv->phydev = of_phy_connect(ndev, priv->phy_node, + mtk_star_adjust_link, 0, priv->phy_intf); + if (!priv->phydev) { + netdev_err(ndev, "failed to connect to PHY\n"); + ret = -ENODEV; + goto err_free_irq; + } + + mtk_star_dma_start(priv); + phy_start(priv->phydev); + netif_start_queue(ndev); + + return 0; + +err_free_irq: + napi_disable(&priv->rx_napi); + napi_disable(&priv->tx_napi); + free_irq(ndev->irq, ndev); +err_free_skbs: + mtk_star_free_rx_skbs(priv); +err_out: + return ret; +} + +static void mtk_star_disable(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + + netif_stop_queue(ndev); + napi_disable(&priv->tx_napi); + napi_disable(&priv->rx_napi); + mtk_star_intr_disable(priv); + mtk_star_dma_disable(priv); + mtk_star_intr_ack_all(priv); + phy_stop(priv->phydev); + phy_disconnect(priv->phydev); + free_irq(ndev->irq, ndev); + mtk_star_free_rx_skbs(priv); + mtk_star_free_tx_skbs(priv); +} + +static int mtk_star_netdev_open(struct net_device *ndev) +{ + return mtk_star_enable(ndev); +} + +static int mtk_star_netdev_stop(struct net_device *ndev) +{ + mtk_star_disable(ndev); + + return 0; +} + +static int mtk_star_netdev_ioctl(struct net_device *ndev, + struct ifreq *req, int cmd) +{ + if (!netif_running(ndev)) + return -EINVAL; + + return phy_mii_ioctl(ndev->phydev, req, cmd); +} + +static int __mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size) +{ + netif_stop_queue(priv->ndev); + + /* Might race with mtk_star_tx_poll, check again */ + smp_mb(); + if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) < size)) + return -EBUSY; + + netif_start_queue(priv->ndev); + + return 0; +} + +static inline int mtk_star_maybe_stop_tx(struct mtk_star_priv *priv, u16 size) +{ + if (likely(mtk_star_tx_ring_avail(&priv->tx_ring) >= size)) + return 0; + + return __mtk_star_maybe_stop_tx(priv, size); +} + +static netdev_tx_t mtk_star_netdev_start_xmit(struct sk_buff *skb, + struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct mtk_star_ring *ring = &priv->tx_ring; + struct device *dev = mtk_star_get_dev(priv); + struct mtk_star_ring_desc_data desc_data; + int nfrags = skb_shinfo(skb)->nr_frags; + + if (unlikely(mtk_star_tx_ring_avail(ring) < nfrags + 1)) { + if (!netif_queue_stopped(ndev)) { + netif_stop_queue(ndev); + /* This is a hard error, log it. */ + pr_err_ratelimited("Tx ring full when queue awake\n"); + } + return NETDEV_TX_BUSY; + } + + desc_data.dma_addr = mtk_star_dma_map_tx(priv, skb); + if (dma_mapping_error(dev, desc_data.dma_addr)) + goto err_drop_packet; + + desc_data.skb = skb; + desc_data.len = skb->len; + mtk_star_ring_push_head_tx(ring, &desc_data); + + netdev_sent_queue(ndev, skb->len); + + mtk_star_maybe_stop_tx(priv, MTK_STAR_DESC_NEEDED); + + mtk_star_dma_resume_tx(priv); + + return NETDEV_TX_OK; + +err_drop_packet: + dev_kfree_skb(skb); + ndev->stats.tx_dropped++; + return NETDEV_TX_OK; +} + +/* Returns the number of bytes sent or a negative number on the first + * descriptor owned by DMA. + */ +static int mtk_star_tx_complete_one(struct mtk_star_priv *priv) +{ + struct mtk_star_ring *ring = &priv->tx_ring; + struct mtk_star_ring_desc_data desc_data; + int ret; + + ret = mtk_star_ring_pop_tail(ring, &desc_data); + if (ret) + return ret; + + mtk_star_dma_unmap_tx(priv, &desc_data); + ret = desc_data.skb->len; + dev_kfree_skb_irq(desc_data.skb); + + return ret; +} + +static int mtk_star_tx_poll(struct napi_struct *napi, int budget) +{ + struct mtk_star_priv *priv = container_of(napi, struct mtk_star_priv, + tx_napi); + int ret = 0, pkts_compl = 0, bytes_compl = 0, count = 0; + struct mtk_star_ring *ring = &priv->tx_ring; + struct net_device *ndev = priv->ndev; + unsigned int head = ring->head; + unsigned int entry = ring->tail; + + while (entry != head && count < (MTK_STAR_RING_NUM_DESCS - 1)) { + ret = mtk_star_tx_complete_one(priv); + if (ret < 0) + break; + + count++; + pkts_compl++; + bytes_compl += ret; + entry = ring->tail; + } + + netdev_completed_queue(ndev, pkts_compl, bytes_compl); + + if (unlikely(netif_queue_stopped(ndev)) && + (mtk_star_tx_ring_avail(ring) > MTK_STAR_TX_THRESH)) + netif_wake_queue(ndev); + + if (napi_complete(napi)) { + spin_lock(&priv->lock); + mtk_star_enable_dma_irq(priv, false, true); + spin_unlock(&priv->lock); + } + + return 0; +} + +static void mtk_star_netdev_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + + mtk_star_update_stats(priv); + + memcpy(stats, &priv->stats, sizeof(*stats)); +} + +static void mtk_star_set_rx_mode(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct netdev_hw_addr *hw_addr; + unsigned int hash_addr, i; + int ret; + + if (ndev->flags & IFF_PROMISC) { + regmap_set_bits(priv->regs, MTK_STAR_REG_ARL_CFG, + MTK_STAR_BIT_ARL_CFG_MISC_MODE); + } else if (netdev_mc_count(ndev) > MTK_STAR_HASHTABLE_MC_LIMIT || + ndev->flags & IFF_ALLMULTI) { + for (i = 0; i < MTK_STAR_HASHTABLE_SIZE_MAX; i++) { + ret = mtk_star_set_hashbit(priv, i); + if (ret) + goto hash_fail; + } + } else { + /* Clear previous settings. */ + ret = mtk_star_reset_hash_table(priv); + if (ret) + goto hash_fail; + + netdev_for_each_mc_addr(hw_addr, ndev) { + hash_addr = (hw_addr->addr[0] & 0x01) << 8; + hash_addr += hw_addr->addr[5]; + ret = mtk_star_set_hashbit(priv, hash_addr); + if (ret) + goto hash_fail; + } + } + + return; + +hash_fail: + if (ret == -ETIMEDOUT) + netdev_err(ndev, "setting hash bit timed out\n"); + else + /* Should be -EIO */ + netdev_err(ndev, "unable to set hash bit"); +} + +static const struct net_device_ops mtk_star_netdev_ops = { + .ndo_open = mtk_star_netdev_open, + .ndo_stop = mtk_star_netdev_stop, + .ndo_start_xmit = mtk_star_netdev_start_xmit, + .ndo_get_stats64 = mtk_star_netdev_get_stats64, + .ndo_set_rx_mode = mtk_star_set_rx_mode, + .ndo_eth_ioctl = mtk_star_netdev_ioctl, + .ndo_set_mac_address = eth_mac_addr, + .ndo_validate_addr = eth_validate_addr, +}; + +static void mtk_star_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strscpy(info->driver, MTK_STAR_DRVNAME, sizeof(info->driver)); +} + +/* TODO Add ethtool stats. */ +static const struct ethtool_ops mtk_star_ethtool_ops = { + .get_drvinfo = mtk_star_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static int mtk_star_rx(struct mtk_star_priv *priv, int budget) +{ + struct mtk_star_ring *ring = &priv->rx_ring; + struct device *dev = mtk_star_get_dev(priv); + struct mtk_star_ring_desc_data desc_data; + struct net_device *ndev = priv->ndev; + struct sk_buff *curr_skb, *new_skb; + dma_addr_t new_dma_addr; + int ret, count = 0; + + while (count < budget) { + ret = mtk_star_ring_pop_tail(ring, &desc_data); + if (ret) + return -1; + + curr_skb = desc_data.skb; + + if ((desc_data.flags & MTK_STAR_DESC_BIT_RX_CRCE) || + (desc_data.flags & MTK_STAR_DESC_BIT_RX_OSIZE)) { + /* Error packet -> drop and reuse skb. */ + new_skb = curr_skb; + goto push_new_skb; + } + + /* Prepare new skb before receiving the current one. + * Reuse the current skb if we fail at any point. + */ + new_skb = mtk_star_alloc_skb(ndev); + if (!new_skb) { + ndev->stats.rx_dropped++; + new_skb = curr_skb; + goto push_new_skb; + } + + new_dma_addr = mtk_star_dma_map_rx(priv, new_skb); + if (dma_mapping_error(dev, new_dma_addr)) { + ndev->stats.rx_dropped++; + dev_kfree_skb(new_skb); + new_skb = curr_skb; + netdev_err(ndev, "DMA mapping error of RX descriptor\n"); + goto push_new_skb; + } + + /* We can't fail anymore at this point: + * it's safe to unmap the skb. + */ + mtk_star_dma_unmap_rx(priv, &desc_data); + + skb_put(desc_data.skb, desc_data.len); + desc_data.skb->ip_summed = CHECKSUM_NONE; + desc_data.skb->protocol = eth_type_trans(desc_data.skb, ndev); + desc_data.skb->dev = ndev; + netif_receive_skb(desc_data.skb); + + /* update dma_addr for new skb */ + desc_data.dma_addr = new_dma_addr; + +push_new_skb: + + count++; + + desc_data.len = skb_tailroom(new_skb); + desc_data.skb = new_skb; + mtk_star_ring_push_head_rx(ring, &desc_data); + } + + mtk_star_dma_resume_rx(priv); + + return count; +} + +static int mtk_star_rx_poll(struct napi_struct *napi, int budget) +{ + struct mtk_star_priv *priv; + int work_done = 0; + + priv = container_of(napi, struct mtk_star_priv, rx_napi); + + work_done = mtk_star_rx(priv, budget); + if (work_done < budget) { + napi_complete_done(napi, work_done); + spin_lock(&priv->lock); + mtk_star_enable_dma_irq(priv, true, false); + spin_unlock(&priv->lock); + } + + return work_done; +} + +static void mtk_star_mdio_rwok_clear(struct mtk_star_priv *priv) +{ + regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, + MTK_STAR_BIT_PHY_CTRL0_RWOK); +} + +static int mtk_star_mdio_rwok_wait(struct mtk_star_priv *priv) +{ + unsigned int val; + + return regmap_read_poll_timeout(priv->regs, MTK_STAR_REG_PHY_CTRL0, + val, val & MTK_STAR_BIT_PHY_CTRL0_RWOK, + 10, MTK_STAR_WAIT_TIMEOUT); +} + +static int mtk_star_mdio_read(struct mii_bus *mii, int phy_id, int regnum) +{ + struct mtk_star_priv *priv = mii->priv; + unsigned int val, data; + int ret; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + mtk_star_mdio_rwok_clear(priv); + + val = (regnum << MTK_STAR_OFF_PHY_CTRL0_PREG); + val &= MTK_STAR_MSK_PHY_CTRL0_PREG; + val |= MTK_STAR_BIT_PHY_CTRL0_RDCMD; + + regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val); + + ret = mtk_star_mdio_rwok_wait(priv); + if (ret) + return ret; + + regmap_read(priv->regs, MTK_STAR_REG_PHY_CTRL0, &data); + + data &= MTK_STAR_MSK_PHY_CTRL0_RWDATA; + data >>= MTK_STAR_OFF_PHY_CTRL0_RWDATA; + + return data; +} + +static int mtk_star_mdio_write(struct mii_bus *mii, int phy_id, + int regnum, u16 data) +{ + struct mtk_star_priv *priv = mii->priv; + unsigned int val; + + if (regnum & MII_ADDR_C45) + return -EOPNOTSUPP; + + mtk_star_mdio_rwok_clear(priv); + + val = data; + val <<= MTK_STAR_OFF_PHY_CTRL0_RWDATA; + val &= MTK_STAR_MSK_PHY_CTRL0_RWDATA; + regnum <<= MTK_STAR_OFF_PHY_CTRL0_PREG; + regnum &= MTK_STAR_MSK_PHY_CTRL0_PREG; + val |= regnum; + val |= MTK_STAR_BIT_PHY_CTRL0_WTCMD; + + regmap_write(priv->regs, MTK_STAR_REG_PHY_CTRL0, val); + + return mtk_star_mdio_rwok_wait(priv); +} + +static int mtk_star_mdio_init(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct device *dev = mtk_star_get_dev(priv); + struct device_node *of_node, *mdio_node; + int ret; + + of_node = dev->of_node; + + mdio_node = of_get_child_by_name(of_node, "mdio"); + if (!mdio_node) + return -ENODEV; + + if (!of_device_is_available(mdio_node)) { + ret = -ENODEV; + goto out_put_node; + } + + priv->mii = devm_mdiobus_alloc(dev); + if (!priv->mii) { + ret = -ENOMEM; + goto out_put_node; + } + + snprintf(priv->mii->id, MII_BUS_ID_SIZE, "%s", dev_name(dev)); + priv->mii->name = "mtk-mac-mdio"; + priv->mii->parent = dev; + priv->mii->read = mtk_star_mdio_read; + priv->mii->write = mtk_star_mdio_write; + priv->mii->priv = priv; + + ret = devm_of_mdiobus_register(dev, priv->mii, mdio_node); + +out_put_node: + of_node_put(mdio_node); + return ret; +} + +static __maybe_unused int mtk_star_suspend(struct device *dev) +{ + struct mtk_star_priv *priv; + struct net_device *ndev; + + ndev = dev_get_drvdata(dev); + priv = netdev_priv(ndev); + + if (netif_running(ndev)) + mtk_star_disable(ndev); + + clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); + + return 0; +} + +static __maybe_unused int mtk_star_resume(struct device *dev) +{ + struct mtk_star_priv *priv; + struct net_device *ndev; + int ret; + + ndev = dev_get_drvdata(dev); + priv = netdev_priv(ndev); + + ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks); + if (ret) + return ret; + + if (netif_running(ndev)) { + ret = mtk_star_enable(ndev); + if (ret) + clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); + } + + return ret; +} + +static void mtk_star_clk_disable_unprepare(void *data) +{ + struct mtk_star_priv *priv = data; + + clk_bulk_disable_unprepare(MTK_STAR_NCLKS, priv->clks); +} + +static int mtk_star_set_timing(struct mtk_star_priv *priv) +{ + struct device *dev = mtk_star_get_dev(priv); + unsigned int delay_val = 0; + + switch (priv->phy_intf) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_RMII: + delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_RX_CLK, priv->rx_inv); + delay_val |= FIELD_PREP(MTK_STAR_BIT_INV_TX_CLK, priv->tx_inv); + break; + default: + dev_err(dev, "This interface not supported\n"); + return -EINVAL; + } + + return regmap_write(priv->regs, MTK_STAR_REG_TEST0, delay_val); +} + +static int mtk_star_probe(struct platform_device *pdev) +{ + struct device_node *of_node; + struct mtk_star_priv *priv; + struct net_device *ndev; + struct device *dev; + void __iomem *base; + int ret, i; + + dev = &pdev->dev; + of_node = dev->of_node; + + ndev = devm_alloc_etherdev(dev, sizeof(*priv)); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + priv->ndev = ndev; + priv->compat_data = of_device_get_match_data(&pdev->dev); + SET_NETDEV_DEV(ndev, dev); + platform_set_drvdata(pdev, ndev); + + ndev->min_mtu = ETH_ZLEN; + ndev->max_mtu = MTK_STAR_MAX_FRAME_SIZE; + + spin_lock_init(&priv->lock); + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + /* We won't be checking the return values of regmap read & write + * functions. They can only fail for mmio if there's a clock attached + * to regmap which is not the case here. + */ + priv->regs = devm_regmap_init_mmio(dev, base, + &mtk_star_regmap_config); + if (IS_ERR(priv->regs)) + return PTR_ERR(priv->regs); + + priv->pericfg = syscon_regmap_lookup_by_phandle(of_node, + "mediatek,pericfg"); + if (IS_ERR(priv->pericfg)) { + dev_err(dev, "Failed to lookup the PERICFG syscon\n"); + return PTR_ERR(priv->pericfg); + } + + ndev->irq = platform_get_irq(pdev, 0); + if (ndev->irq < 0) + return ndev->irq; + + for (i = 0; i < MTK_STAR_NCLKS; i++) + priv->clks[i].id = mtk_star_clk_names[i]; + ret = devm_clk_bulk_get(dev, MTK_STAR_NCLKS, priv->clks); + if (ret) + return ret; + + ret = clk_bulk_prepare_enable(MTK_STAR_NCLKS, priv->clks); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, + mtk_star_clk_disable_unprepare, priv); + if (ret) + return ret; + + ret = of_get_phy_mode(of_node, &priv->phy_intf); + if (ret) { + return ret; + } else if (priv->phy_intf != PHY_INTERFACE_MODE_RMII && + priv->phy_intf != PHY_INTERFACE_MODE_MII) { + dev_err(dev, "unsupported phy mode: %s\n", + phy_modes(priv->phy_intf)); + return -EINVAL; + } + + priv->phy_node = of_parse_phandle(of_node, "phy-handle", 0); + if (!priv->phy_node) { + dev_err(dev, "failed to retrieve the phy handle from device tree\n"); + return -ENODEV; + } + + priv->rmii_rxc = of_property_read_bool(of_node, "mediatek,rmii-rxc"); + priv->rx_inv = of_property_read_bool(of_node, "mediatek,rxc-inverse"); + priv->tx_inv = of_property_read_bool(of_node, "mediatek,txc-inverse"); + + if (priv->compat_data->set_interface_mode) { + ret = priv->compat_data->set_interface_mode(ndev); + if (ret) { + dev_err(dev, "Failed to set phy interface, err = %d\n", ret); + return -EINVAL; + } + } + + ret = mtk_star_set_timing(priv); + if (ret) { + dev_err(dev, "Failed to set timing, err = %d\n", ret); + return -EINVAL; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(dev, "unsupported DMA mask\n"); + return ret; + } + + priv->ring_base = dmam_alloc_coherent(dev, MTK_STAR_DMA_SIZE, + &priv->dma_addr, + GFP_KERNEL | GFP_DMA); + if (!priv->ring_base) + return -ENOMEM; + + mtk_star_nic_disable_pd(priv); + mtk_star_init_config(priv); + + ret = mtk_star_mdio_init(ndev); + if (ret) + return ret; + + ret = platform_get_ethdev_address(dev, ndev); + if (ret || !is_valid_ether_addr(ndev->dev_addr)) + eth_hw_addr_random(ndev); + + ndev->netdev_ops = &mtk_star_netdev_ops; + ndev->ethtool_ops = &mtk_star_ethtool_ops; + + netif_napi_add(ndev, &priv->rx_napi, mtk_star_rx_poll); + netif_napi_add_tx(ndev, &priv->tx_napi, mtk_star_tx_poll); + + return devm_register_netdev(dev, ndev); +} + +#ifdef CONFIG_OF +static int mt8516_set_interface_mode(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct device *dev = mtk_star_get_dev(priv); + unsigned int intf_val, ret, rmii_rxc; + + switch (priv->phy_intf) { + case PHY_INTERFACE_MODE_MII: + intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII; + rmii_rxc = 0; + break; + case PHY_INTERFACE_MODE_RMII: + intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII; + rmii_rxc = priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK; + break; + default: + dev_err(dev, "This interface not supported\n"); + return -EINVAL; + } + + ret = regmap_update_bits(priv->pericfg, + MTK_PERICFG_REG_NIC_CFG1_CON, + MTK_PERICFG_BIT_NIC_CFG_CON_CLK, + rmii_rxc); + if (ret) + return ret; + + return regmap_update_bits(priv->pericfg, + MTK_PERICFG_REG_NIC_CFG0_CON, + MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF, + intf_val); +} + +static int mt8365_set_interface_mode(struct net_device *ndev) +{ + struct mtk_star_priv *priv = netdev_priv(ndev); + struct device *dev = mtk_star_get_dev(priv); + unsigned int intf_val; + + switch (priv->phy_intf) { + case PHY_INTERFACE_MODE_MII: + intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_MII; + break; + case PHY_INTERFACE_MODE_RMII: + intf_val = MTK_PERICFG_BIT_NIC_CFG_CON_RMII; + intf_val |= priv->rmii_rxc ? 0 : MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2; + break; + default: + dev_err(dev, "This interface not supported\n"); + return -EINVAL; + } + + return regmap_update_bits(priv->pericfg, + MTK_PERICFG_REG_NIC_CFG_CON_V2, + MTK_PERICFG_REG_NIC_CFG_CON_CFG_INTF | + MTK_PERICFG_BIT_NIC_CFG_CON_CLK_V2, + intf_val); +} + +static const struct mtk_star_compat mtk_star_mt8516_compat = { + .set_interface_mode = mt8516_set_interface_mode, + .bit_clk_div = MTK_STAR_BIT_CLK_DIV_10, +}; + +static const struct mtk_star_compat mtk_star_mt8365_compat = { + .set_interface_mode = mt8365_set_interface_mode, + .bit_clk_div = MTK_STAR_BIT_CLK_DIV_50, +}; + +static const struct of_device_id mtk_star_of_match[] = { + { .compatible = "mediatek,mt8516-eth", + .data = &mtk_star_mt8516_compat }, + { .compatible = "mediatek,mt8518-eth", + .data = &mtk_star_mt8516_compat }, + { .compatible = "mediatek,mt8175-eth", + .data = &mtk_star_mt8516_compat }, + { .compatible = "mediatek,mt8365-eth", + .data = &mtk_star_mt8365_compat }, + { } +}; +MODULE_DEVICE_TABLE(of, mtk_star_of_match); +#endif + +static SIMPLE_DEV_PM_OPS(mtk_star_pm_ops, + mtk_star_suspend, mtk_star_resume); + +static struct platform_driver mtk_star_driver = { + .driver = { + .name = MTK_STAR_DRVNAME, + .pm = &mtk_star_pm_ops, + .of_match_table = of_match_ptr(mtk_star_of_match), + }, + .probe = mtk_star_probe, +}; +module_platform_driver(mtk_star_driver); + +MODULE_AUTHOR("Bartosz Golaszewski <bgolaszewski@baylibre.com>"); +MODULE_DESCRIPTION("Mediatek STAR Ethernet MAC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c new file mode 100644 index 000000000..65e01bf4b --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -0,0 +1,1160 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/bitfield.h> +#include <linux/dma-mapping.h> +#include <linux/skbuff.h> +#include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/mfd/syscon.h> +#include <linux/debugfs.h> +#include <linux/soc/mediatek/mtk_wed.h> +#include "mtk_eth_soc.h" +#include "mtk_wed_regs.h" +#include "mtk_wed.h" +#include "mtk_ppe.h" + +#define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) + +#define MTK_WED_PKT_SIZE 1900 +#define MTK_WED_BUF_SIZE 2048 +#define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) + +#define MTK_WED_TX_RING_SIZE 2048 +#define MTK_WED_WDMA_RING_SIZE 1024 +#define MTK_WED_MAX_GROUP_SIZE 0x100 +#define MTK_WED_VLD_GROUP_SIZE 0x40 +#define MTK_WED_PER_GROUP_PKT 128 + +#define MTK_WED_FBUF_SIZE 128 + +static struct mtk_wed_hw *hw_list[2]; +static DEFINE_MUTEX(hw_lock); + +static void +wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) +{ + regmap_update_bits(dev->hw->regs, reg, mask | val, val); +} + +static void +wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + return wed_m32(dev, reg, 0, mask); +} + +static void +wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + return wed_m32(dev, reg, mask, 0); +} + +static void +wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) +{ + wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); +} + +static void +wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) +{ + wdma_m32(dev, reg, 0, mask); +} + +static u32 +mtk_wed_read_reset(struct mtk_wed_device *dev) +{ + return wed_r32(dev, MTK_WED_RESET); +} + +static void +mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) +{ + u32 status; + + wed_w32(dev, MTK_WED_RESET, mask); + if (readx_poll_timeout(mtk_wed_read_reset, dev, status, + !(status & mask), 0, 1000)) + WARN_ON_ONCE(1); +} + +static struct mtk_wed_hw * +mtk_wed_assign(struct mtk_wed_device *dev) +{ + struct mtk_wed_hw *hw; + int i; + + if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { + hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; + if (!hw) + return NULL; + + if (!hw->wed_dev) + goto out; + + if (hw->version == 1) + return NULL; + + /* MT7986 WED devices do not have any pcie slot restrictions */ + } + /* MT7986 PCIE or AXI */ + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { + hw = hw_list[i]; + if (hw && !hw->wed_dev) + goto out; + } + + return NULL; + +out: + hw->wed_dev = dev; + return hw; +} + +static int +mtk_wed_buffer_alloc(struct mtk_wed_device *dev) +{ + struct mtk_wdma_desc *desc; + dma_addr_t desc_phys; + void **page_list; + int token = dev->wlan.token_start; + int ring_size; + int n_pages; + int i, page_idx; + + ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); + n_pages = ring_size / MTK_WED_BUF_PER_PAGE; + + page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); + if (!page_list) + return -ENOMEM; + + dev->buf_ring.size = ring_size; + dev->buf_ring.pages = page_list; + + desc = dma_alloc_coherent(dev->hw->dev, ring_size * sizeof(*desc), + &desc_phys, GFP_KERNEL); + if (!desc) + return -ENOMEM; + + dev->buf_ring.desc = desc; + dev->buf_ring.desc_phys = desc_phys; + + for (i = 0, page_idx = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { + dma_addr_t page_phys, buf_phys; + struct page *page; + void *buf; + int s; + + page = __dev_alloc_pages(GFP_KERNEL, 0); + if (!page) + return -ENOMEM; + + page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, + DMA_BIDIRECTIONAL); + if (dma_mapping_error(dev->hw->dev, page_phys)) { + __free_page(page); + return -ENOMEM; + } + + page_list[page_idx++] = page; + dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + + buf = page_to_virt(page); + buf_phys = page_phys; + + for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { + u32 txd_size; + u32 ctrl; + + txd_size = dev->wlan.init_buf(buf, buf_phys, token++); + + desc->buf0 = cpu_to_le32(buf_phys); + desc->buf1 = cpu_to_le32(buf_phys + txd_size); + + if (dev->hw->version == 1) + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, + MTK_WED_BUF_SIZE - txd_size) | + MTK_WDMA_DESC_CTRL_LAST_SEG1; + else + ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size) | + FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, + MTK_WED_BUF_SIZE - txd_size) | + MTK_WDMA_DESC_CTRL_LAST_SEG0; + desc->ctrl = cpu_to_le32(ctrl); + desc->info = 0; + desc++; + + buf += MTK_WED_BUF_SIZE; + buf_phys += MTK_WED_BUF_SIZE; + } + + dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, + DMA_BIDIRECTIONAL); + } + + return 0; +} + +static void +mtk_wed_free_buffer(struct mtk_wed_device *dev) +{ + struct mtk_wdma_desc *desc = dev->buf_ring.desc; + void **page_list = dev->buf_ring.pages; + int page_idx; + int i; + + if (!page_list) + return; + + if (!desc) + goto free_pagelist; + + for (i = 0, page_idx = 0; i < dev->buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { + void *page = page_list[page_idx++]; + dma_addr_t buf_addr; + + if (!page) + break; + + buf_addr = le32_to_cpu(desc[i].buf0); + dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, + DMA_BIDIRECTIONAL); + __free_page(page); + } + + dma_free_coherent(dev->hw->dev, dev->buf_ring.size * sizeof(*desc), + desc, dev->buf_ring.desc_phys); + +free_pagelist: + kfree(page_list); +} + +static void +mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) +{ + if (!ring->desc) + return; + + dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, + ring->desc, ring->desc_phys); +} + +static void +mtk_wed_free_tx_rings(struct mtk_wed_device *dev) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) + mtk_wed_free_ring(dev, &dev->tx_ring[i]); + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) + mtk_wed_free_ring(dev, &dev->tx_wdma[i]); +} + +static void +mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) +{ + u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; + + if (dev->hw->version == 1) + mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; + else + mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | + MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | + MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | + MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; + + if (!dev->hw->num_flows) + mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; + + wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); + wed_r32(dev, MTK_WED_EXT_INT_MASK); +} + +static void +mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) +{ + if (enable) { + wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); + wed_w32(dev, MTK_WED_TXP_DW1, + FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); + } else { + wed_w32(dev, MTK_WED_TXP_DW1, + FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); + wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); + } +} + +static void +mtk_wed_dma_disable(struct mtk_wed_device *dev) +{ + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + + wed_clr(dev, MTK_WED_GLO_CFG, + MTK_WED_GLO_CFG_TX_DMA_EN | + MTK_WED_GLO_CFG_RX_DMA_EN); + + wdma_m32(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_TX_DMA_EN | + MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES, 0); + + if (dev->hw->version == 1) { + regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); + wdma_m32(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES, 0); + } else { + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); + + mtk_wed_set_512_support(dev, false); + } +} + +static void +mtk_wed_stop(struct mtk_wed_device *dev) +{ + mtk_wed_dma_disable(dev); + mtk_wed_set_ext_int(dev, false); + + wed_clr(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_EN | + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); + wdma_w32(dev, MTK_WDMA_INT_MASK, 0); + wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, 0); +} + +static void +mtk_wed_detach(struct mtk_wed_device *dev) +{ + struct mtk_wed_hw *hw = dev->hw; + + mutex_lock(&hw_lock); + + mtk_wed_stop(dev); + + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + + mtk_wed_reset(dev, MTK_WED_RESET_WED); + + mtk_wed_free_buffer(dev); + mtk_wed_free_tx_rings(dev); + + if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { + struct device_node *wlan_node; + + wlan_node = dev->wlan.pci_dev->dev.of_node; + if (of_dma_is_coherent(wlan_node) && hw->hifsys) + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, + BIT(hw->index), BIT(hw->index)); + } + + if (!hw_list[!hw->index]->wed_dev && + hw->eth->dma_dev != hw->eth->dev) + mtk_eth_set_dma_device(hw->eth, hw->eth->dev); + + memset(dev, 0, sizeof(*dev)); + module_put(THIS_MODULE); + + hw->wed_dev = NULL; + mutex_unlock(&hw_lock); +} + +#define PCIE_BASE_ADDR0 0x11280000 +static void +mtk_wed_bus_init(struct mtk_wed_device *dev) +{ + switch (dev->wlan.bus_type) { + case MTK_WED_BUS_PCIE: { + struct device_node *np = dev->hw->eth->dev->of_node; + struct regmap *regs; + + regs = syscon_regmap_lookup_by_phandle(np, + "mediatek,wed-pcie"); + if (IS_ERR(regs)) + break; + + regmap_update_bits(regs, 0, BIT(0), BIT(0)); + + wed_w32(dev, MTK_WED_PCIE_INT_CTRL, + FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); + + /* pcie interrupt control: pola/source selection */ + wed_set(dev, MTK_WED_PCIE_INT_CTRL, + MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | + FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, 1)); + wed_r32(dev, MTK_WED_PCIE_INT_CTRL); + + wed_w32(dev, MTK_WED_PCIE_CFG_INTM, PCIE_BASE_ADDR0 | 0x180); + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, PCIE_BASE_ADDR0 | 0x184); + + /* pcie interrupt status trigger register */ + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); + wed_r32(dev, MTK_WED_PCIE_INT_TRIGGER); + + /* pola setting */ + wed_set(dev, MTK_WED_PCIE_INT_CTRL, + MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA); + break; + } + case MTK_WED_BUS_AXI: + wed_set(dev, MTK_WED_WPDMA_INT_CTRL, + MTK_WED_WPDMA_INT_CTRL_SIG_SRC | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); + break; + default: + break; + } +} + +static void +mtk_wed_set_wpdma(struct mtk_wed_device *dev) +{ + if (dev->hw->version == 1) { + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); + } else { + mtk_wed_bus_init(dev); + + wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); + wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); + wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); + } +} + +static void +mtk_wed_hw_init_early(struct mtk_wed_device *dev) +{ + u32 mask, set; + + mtk_wed_stop(dev); + mtk_wed_reset(dev, MTK_WED_RESET_WED); + mtk_wed_set_wpdma(dev); + + mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE | + MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | + MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; + set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2) | + MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | + MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; + wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); + + if (dev->hw->version == 1) { + u32 offset = dev->hw->index ? 0x04000400 : 0; + + wdma_set(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); + + wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); + wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); + wed_w32(dev, MTK_WED_PCIE_CFG_BASE, + MTK_PCIE_BASE(dev->hw->index)); + } else { + wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); + wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); + wed_w32(dev, MTK_WED_WDMA_OFFSET0, + FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, + MTK_WDMA_INT_STATUS) | + FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, + MTK_WDMA_GLO_CFG)); + + wed_w32(dev, MTK_WED_WDMA_OFFSET1, + FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, + MTK_WDMA_RING_TX(0)) | + FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, + MTK_WDMA_RING_RX(0))); + } +} + +static void +mtk_wed_hw_init(struct mtk_wed_device *dev) +{ + if (dev->init_done) + return; + + dev->init_done = true; + mtk_wed_set_ext_int(dev, false); + wed_w32(dev, MTK_WED_TX_BM_CTRL, + MTK_WED_TX_BM_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, + dev->buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, + MTK_WED_TX_RING_SIZE / 256)); + + wed_w32(dev, MTK_WED_TX_BM_BASE, dev->buf_ring.desc_phys); + + wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); + + if (dev->hw->version == 1) { + wed_w32(dev, MTK_WED_TX_BM_TKID, + FIELD_PREP(MTK_WED_TX_BM_TKID_START, + dev->wlan.token_start) | + FIELD_PREP(MTK_WED_TX_BM_TKID_END, + dev->wlan.token_start + + dev->wlan.nbuf - 1)); + wed_w32(dev, MTK_WED_TX_BM_DYN_THR, + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | + MTK_WED_TX_BM_DYN_THR_HI); + } else { + wed_w32(dev, MTK_WED_TX_BM_TKID_V2, + FIELD_PREP(MTK_WED_TX_BM_TKID_START, + dev->wlan.token_start) | + FIELD_PREP(MTK_WED_TX_BM_TKID_END, + dev->wlan.token_start + + dev->wlan.nbuf - 1)); + wed_w32(dev, MTK_WED_TX_BM_DYN_THR, + FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | + MTK_WED_TX_BM_DYN_THR_HI_V2); + wed_w32(dev, MTK_WED_TX_TKID_CTRL, + MTK_WED_TX_TKID_CTRL_PAUSE | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, + dev->buf_ring.size / 128) | + FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, + dev->buf_ring.size / 128)); + wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, + FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | + MTK_WED_TX_TKID_DYN_THR_HI); + } + + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); + + if (dev->hw->version == 1) + wed_set(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + else + wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); + + wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); +} + +static void +mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size) +{ + void *head = (void *)ring->desc; + int i; + + for (i = 0; i < size; i++) { + struct mtk_wdma_desc *desc; + + desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); + desc->buf0 = 0; + desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); + desc->buf1 = 0; + desc->info = 0; + } +} + +static u32 +mtk_wed_check_busy(struct mtk_wed_device *dev) +{ + if (wed_r32(dev, MTK_WED_GLO_CFG) & MTK_WED_GLO_CFG_TX_DMA_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_WPDMA_GLO_CFG) & + MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_CTRL) & MTK_WED_CTRL_WDMA_INT_AGENT_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_WDMA_GLO_CFG) & + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) + return true; + + if (wdma_r32(dev, MTK_WDMA_GLO_CFG) & + MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY) + return true; + + if (wed_r32(dev, MTK_WED_CTRL) & + (MTK_WED_CTRL_WED_TX_BM_BUSY | MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY)) + return true; + + return false; +} + +static int +mtk_wed_poll_busy(struct mtk_wed_device *dev) +{ + int sleep = 15000; + int timeout = 100 * sleep; + u32 val; + + return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, + timeout, false, dev); +} + +static void +mtk_wed_reset_dma(struct mtk_wed_device *dev) +{ + bool busy = false; + u32 val; + int i; + + for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { + if (!dev->tx_ring[i].desc) + continue; + + mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE); + } + + if (mtk_wed_poll_busy(dev)) + busy = mtk_wed_check_busy(dev); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); + } else { + wed_w32(dev, MTK_WED_RESET_IDX, + MTK_WED_RESET_IDX_TX | + MTK_WED_RESET_IDX_RX); + wed_w32(dev, MTK_WED_RESET_IDX, 0); + } + + wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); + wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); + } else { + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, + MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); + wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); + + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); + + wed_clr(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); + } + + for (i = 0; i < 100; i++) { + val = wed_r32(dev, MTK_WED_TX_BM_INTF); + if (FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, val) == 0x40) + break; + } + + mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); + + if (busy) { + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); + mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); + } else { + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, + MTK_WED_WPDMA_RESET_IDX_TX | + MTK_WED_WPDMA_RESET_IDX_RX); + wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); + } + +} + +static int +mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, + int size, u32 desc_size) +{ + ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, + &ring->desc_phys, GFP_KERNEL); + if (!ring->desc) + return -ENOMEM; + + ring->desc_size = desc_size; + ring->size = size; + mtk_wed_ring_reset(ring, size); + + return 0; +} + +static int +mtk_wed_wdma_ring_setup(struct mtk_wed_device *dev, int idx, int size) +{ + u32 desc_size = sizeof(struct mtk_wdma_desc) * dev->hw->version; + struct mtk_wed_ring *wdma = &dev->tx_wdma[idx]; + + if (mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, desc_size)) + return -ENOMEM; + + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, + wdma->desc_phys); + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, + size); + wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); + + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, + wdma->desc_phys); + wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, + size); + + return 0; +} + +static void +mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) +{ + u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); + + /* wed control cr set */ + wed_set(dev, MTK_WED_CTRL, + MTK_WED_CTRL_WDMA_INT_AGENT_EN | + MTK_WED_CTRL_WPDMA_INT_AGENT_EN | + MTK_WED_CTRL_WED_TX_BM_EN | + MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); + + if (dev->hw->version == 1) { + wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, + MTK_WED_PCIE_INT_TRIGGER_STATUS); + + wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, + MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | + MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); + + wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); + } else { + /* initail tx interrupt trigger */ + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, + MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | + MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | + MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | + MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, + dev->wlan.tx_tbit[0]) | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, + dev->wlan.tx_tbit[1])); + + /* initail txfree interrupt trigger */ + wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, + MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | + MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | + FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, + dev->wlan.txfree_tbit)); + + wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); + wed_set(dev, MTK_WED_WDMA_INT_CTRL, + FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, + dev->wdma_idx)); + } + + wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); + + wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); + wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); + wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); + wed_w32(dev, MTK_WED_INT_MASK, irq_mask); +} + +static void +mtk_wed_dma_enable(struct mtk_wed_device *dev) +{ + wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); + + wed_set(dev, MTK_WED_GLO_CFG, + MTK_WED_GLO_CFG_TX_DMA_EN | + MTK_WED_GLO_CFG_RX_DMA_EN); + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); + wed_set(dev, MTK_WED_WDMA_GLO_CFG, + MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); + + wdma_set(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_TX_DMA_EN | + MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | + MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); + + if (dev->hw->version == 1) { + wdma_set(dev, MTK_WDMA_GLO_CFG, + MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); + } else { + wed_set(dev, MTK_WED_WPDMA_CTRL, + MTK_WED_WPDMA_CTRL_SDL1_FIXED); + + wed_set(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | + MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); + + wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, + MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | + MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); + } +} + +static void +mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) + if (!dev->tx_wdma[i].desc) + mtk_wed_wdma_ring_setup(dev, i, 16); + + mtk_wed_hw_init(dev); + mtk_wed_configure_irq(dev, irq_mask); + + mtk_wed_set_ext_int(dev, true); + + if (dev->hw->version == 1) { + u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | + FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, + dev->hw->index); + + val |= BIT(0) | (BIT(1) * !!dev->hw->index); + regmap_write(dev->hw->mirror, dev->hw->index * 4, val); + } else { + mtk_wed_set_512_support(dev, true); + } + + mtk_wed_dma_enable(dev); + dev->running = true; +} + +static int +mtk_wed_attach(struct mtk_wed_device *dev) + __releases(RCU) +{ + struct mtk_wed_hw *hw; + struct device *device; + int ret = 0; + + RCU_LOCKDEP_WARN(!rcu_read_lock_held(), + "mtk_wed_attach without holding the RCU read lock"); + + if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && + pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || + !try_module_get(THIS_MODULE)) + ret = -ENODEV; + + rcu_read_unlock(); + + if (ret) + return ret; + + mutex_lock(&hw_lock); + + hw = mtk_wed_assign(dev); + if (!hw) { + module_put(THIS_MODULE); + ret = -ENODEV; + goto out; + } + + device = dev->wlan.bus_type == MTK_WED_BUS_PCIE + ? &dev->wlan.pci_dev->dev + : &dev->wlan.platform_dev->dev; + dev_info(device, "attaching wed device %d version %d\n", + hw->index, hw->version); + + dev->hw = hw; + dev->dev = hw->dev; + dev->irq = hw->irq; + dev->wdma_idx = hw->index; + + if (hw->eth->dma_dev == hw->eth->dev && + of_dma_is_coherent(hw->eth->dev->of_node)) + mtk_eth_set_dma_device(hw->eth, hw->dev); + + ret = mtk_wed_buffer_alloc(dev); + if (ret) { + mtk_wed_detach(dev); + goto out; + } + + mtk_wed_hw_init_early(dev); + if (hw->hifsys) + regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, + BIT(hw->index), 0); + +out: + mutex_unlock(&hw_lock); + + return ret; +} + +static int +mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->tx_ring[idx]; + + /* + * Tx ring redirection: + * Instead of configuring the WLAN PDMA TX ring directly, the WLAN + * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) + * registers. + * + * WED driver posts its own DMA ring as WLAN PDMA TX and configures it + * into MTK_WED_WPDMA_RING_TX(n) registers. + * It gets filled with packets picked up from WED TX ring and from + * WDMA RX. + */ + + BUG_ON(idx >= ARRAY_SIZE(dev->tx_ring)); + + if (mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, + sizeof(*ring->desc))) + return -ENOMEM; + + if (mtk_wed_wdma_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE)) + return -ENOMEM; + + ring->reg_base = MTK_WED_RING_TX(idx); + ring->wpdma = regs; + + /* WED -> WPDMA */ + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); + wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); + + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, + ring->desc_phys); + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, + MTK_WED_TX_RING_SIZE); + wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); + + return 0; +} + +static int +mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) +{ + struct mtk_wed_ring *ring = &dev->txfree_ring; + int i, index = dev->hw->version == 1; + + /* + * For txfree event handling, the same DMA ring is shared between WED + * and WLAN. The WLAN driver accesses the ring index registers through + * WED + */ + ring->reg_base = MTK_WED_RING_RX(index); + ring->wpdma = regs; + + for (i = 0; i < 12; i += 4) { + u32 val = readl(regs + i); + + wed_w32(dev, MTK_WED_RING_RX(index) + i, val); + wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); + } + + return 0; +} + +static u32 +mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) +{ + u32 val, ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; + + if (dev->hw->version == 1) + ext_mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; + else + ext_mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | + MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | + MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | + MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; + + val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); + wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); + val &= ext_mask; + if (!dev->hw->num_flows) + val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; + if (val && net_ratelimit()) + pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); + + val = wed_r32(dev, MTK_WED_INT_STATUS); + val &= mask; + wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ + + return val; +} + +static void +mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) +{ + if (!dev->running) + return; + + mtk_wed_set_ext_int(dev, !!mask); + wed_w32(dev, MTK_WED_INT_MASK, mask); +} + +int mtk_wed_flow_add(int index) +{ + struct mtk_wed_hw *hw = hw_list[index]; + int ret; + + if (!hw || !hw->wed_dev) + return -ENODEV; + + if (hw->num_flows) { + hw->num_flows++; + return 0; + } + + mutex_lock(&hw_lock); + if (!hw->wed_dev) { + ret = -ENODEV; + goto out; + } + + ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); + if (!ret) + hw->num_flows++; + mtk_wed_set_ext_int(hw->wed_dev, true); + +out: + mutex_unlock(&hw_lock); + + return ret; +} + +void mtk_wed_flow_remove(int index) +{ + struct mtk_wed_hw *hw = hw_list[index]; + + if (!hw) + return; + + if (--hw->num_flows) + return; + + mutex_lock(&hw_lock); + if (!hw->wed_dev) + goto out; + + hw->wed_dev->wlan.offload_disable(hw->wed_dev); + mtk_wed_set_ext_int(hw->wed_dev, true); + +out: + mutex_unlock(&hw_lock); +} + +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + void __iomem *wdma, phys_addr_t wdma_phy, + int index) +{ + static const struct mtk_wed_ops wed_ops = { + .attach = mtk_wed_attach, + .tx_ring_setup = mtk_wed_tx_ring_setup, + .txfree_ring_setup = mtk_wed_txfree_ring_setup, + .start = mtk_wed_start, + .stop = mtk_wed_stop, + .reset_dma = mtk_wed_reset_dma, + .reg_read = wed_r32, + .reg_write = wed_w32, + .irq_get = mtk_wed_irq_get, + .irq_set_mask = mtk_wed_irq_set_mask, + .detach = mtk_wed_detach, + }; + struct device_node *eth_np = eth->dev->of_node; + struct platform_device *pdev; + struct mtk_wed_hw *hw; + struct regmap *regs; + int irq; + + if (!np) + return; + + pdev = of_find_device_by_node(np); + if (!pdev) + goto err_of_node_put; + + get_device(&pdev->dev); + irq = platform_get_irq(pdev, 0); + if (irq < 0) + goto err_put_device; + + regs = syscon_regmap_lookup_by_phandle(np, NULL); + if (IS_ERR(regs)) + goto err_put_device; + + rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); + + mutex_lock(&hw_lock); + + if (WARN_ON(hw_list[index])) + goto unlock; + + hw = kzalloc(sizeof(*hw), GFP_KERNEL); + if (!hw) + goto unlock; + + hw->node = np; + hw->regs = regs; + hw->eth = eth; + hw->dev = &pdev->dev; + hw->wdma_phy = wdma_phy; + hw->wdma = wdma; + hw->index = index; + hw->irq = irq; + hw->version = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1; + + if (hw->version == 1) { + hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, + "mediatek,pcie-mirror"); + hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, + "mediatek,hifsys"); + if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { + kfree(hw); + goto unlock; + } + + if (!index) { + regmap_write(hw->mirror, 0, 0); + regmap_write(hw->mirror, 4, 0); + } + } + + mtk_wed_hw_add_debugfs(hw); + + hw_list[index] = hw; + + mutex_unlock(&hw_lock); + + return; + +unlock: + mutex_unlock(&hw_lock); +err_put_device: + put_device(&pdev->dev); +err_of_node_put: + of_node_put(np); +} + +void mtk_wed_exit(void) +{ + int i; + + rcu_assign_pointer(mtk_soc_wed_ops, NULL); + + synchronize_rcu(); + + for (i = 0; i < ARRAY_SIZE(hw_list); i++) { + struct mtk_wed_hw *hw; + + hw = hw_list[i]; + if (!hw) + continue; + + hw_list[i] = NULL; + debugfs_remove(hw->debugfs_dir); + put_device(hw->dev); + of_node_put(hw->node); + kfree(hw); + } +} diff --git a/drivers/net/ethernet/mediatek/mtk_wed.h b/drivers/net/ethernet/mediatek/mtk_wed.h new file mode 100644 index 000000000..ae420ca01 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed.h @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_WED_PRIV_H +#define __MTK_WED_PRIV_H + +#include <linux/soc/mediatek/mtk_wed.h> +#include <linux/debugfs.h> +#include <linux/regmap.h> +#include <linux/netdevice.h> + +struct mtk_eth; + +struct mtk_wed_hw { + struct device_node *node; + struct mtk_eth *eth; + struct regmap *regs; + struct regmap *hifsys; + struct device *dev; + void __iomem *wdma; + phys_addr_t wdma_phy; + struct regmap *mirror; + struct dentry *debugfs_dir; + struct mtk_wed_device *wed_dev; + u32 debugfs_reg; + u32 num_flows; + u8 version; + char dirname[5]; + int irq; + int index; +}; + +struct mtk_wdma_info { + u8 wdma_idx; + u8 queue; + u16 wcid; + u8 bss; +}; + +#ifdef CONFIG_NET_MEDIATEK_SOC_WED +static inline void +wed_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + regmap_write(dev->hw->regs, reg, val); +} + +static inline u32 +wed_r32(struct mtk_wed_device *dev, u32 reg) +{ + unsigned int val; + + regmap_read(dev->hw->regs, reg, &val); + + return val; +} + +static inline void +wdma_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + writel(val, dev->hw->wdma + reg); +} + +static inline u32 +wdma_r32(struct mtk_wed_device *dev, u32 reg) +{ + return readl(dev->hw->wdma + reg); +} + +static inline u32 +wpdma_tx_r32(struct mtk_wed_device *dev, int ring, u32 reg) +{ + if (!dev->tx_ring[ring].wpdma) + return 0; + + return readl(dev->tx_ring[ring].wpdma + reg); +} + +static inline void +wpdma_tx_w32(struct mtk_wed_device *dev, int ring, u32 reg, u32 val) +{ + if (!dev->tx_ring[ring].wpdma) + return; + + writel(val, dev->tx_ring[ring].wpdma + reg); +} + +static inline u32 +wpdma_txfree_r32(struct mtk_wed_device *dev, u32 reg) +{ + if (!dev->txfree_ring.wpdma) + return 0; + + return readl(dev->txfree_ring.wpdma + reg); +} + +static inline void +wpdma_txfree_w32(struct mtk_wed_device *dev, u32 reg, u32 val) +{ + if (!dev->txfree_ring.wpdma) + return; + + writel(val, dev->txfree_ring.wpdma + reg); +} + +void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + void __iomem *wdma, phys_addr_t wdma_phy, + int index); +void mtk_wed_exit(void); +int mtk_wed_flow_add(int index); +void mtk_wed_flow_remove(int index); +#else +static inline void +mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, + void __iomem *wdma, phys_addr_t wdma_phy, + int index) +{ +} +static inline void +mtk_wed_exit(void) +{ +} +static inline int mtk_wed_flow_add(int index) +{ + return -EINVAL; +} +static inline void mtk_wed_flow_remove(int index) +{ +} +#endif + +#ifdef CONFIG_DEBUG_FS +void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw); +#else +static inline void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) +{ +} +#endif + +#endif diff --git a/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c new file mode 100644 index 000000000..f420f187e --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_debugfs.c @@ -0,0 +1,178 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2021 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/seq_file.h> +#include "mtk_wed.h" +#include "mtk_wed_regs.h" + +struct reg_dump { + const char *name; + u16 offset; + u8 type; + u8 base; +}; + +enum { + DUMP_TYPE_STRING, + DUMP_TYPE_WED, + DUMP_TYPE_WDMA, + DUMP_TYPE_WPDMA_TX, + DUMP_TYPE_WPDMA_TXFREE, +}; + +#define DUMP_STR(_str) { _str, 0, DUMP_TYPE_STRING } +#define DUMP_REG(_reg, ...) { #_reg, MTK_##_reg, __VA_ARGS__ } +#define DUMP_RING(_prefix, _base, ...) \ + { _prefix " BASE", _base, __VA_ARGS__ }, \ + { _prefix " CNT", _base + 0x4, __VA_ARGS__ }, \ + { _prefix " CIDX", _base + 0x8, __VA_ARGS__ }, \ + { _prefix " DIDX", _base + 0xc, __VA_ARGS__ } + +#define DUMP_WED(_reg) DUMP_REG(_reg, DUMP_TYPE_WED) +#define DUMP_WED_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WED) + +#define DUMP_WDMA(_reg) DUMP_REG(_reg, DUMP_TYPE_WDMA) +#define DUMP_WDMA_RING(_base) DUMP_RING(#_base, MTK_##_base, DUMP_TYPE_WDMA) + +#define DUMP_WPDMA_TX_RING(_n) DUMP_RING("WPDMA_TX" #_n, 0, DUMP_TYPE_WPDMA_TX, _n) +#define DUMP_WPDMA_TXFREE_RING DUMP_RING("WPDMA_RX1", 0, DUMP_TYPE_WPDMA_TXFREE) + +static void +print_reg_val(struct seq_file *s, const char *name, u32 val) +{ + seq_printf(s, "%-32s %08x\n", name, val); +} + +static void +dump_wed_regs(struct seq_file *s, struct mtk_wed_device *dev, + const struct reg_dump *regs, int n_regs) +{ + const struct reg_dump *cur; + u32 val; + + for (cur = regs; cur < ®s[n_regs]; cur++) { + switch (cur->type) { + case DUMP_TYPE_STRING: + seq_printf(s, "%s======== %s:\n", + cur > regs ? "\n" : "", + cur->name); + continue; + case DUMP_TYPE_WED: + val = wed_r32(dev, cur->offset); + break; + case DUMP_TYPE_WDMA: + val = wdma_r32(dev, cur->offset); + break; + case DUMP_TYPE_WPDMA_TX: + val = wpdma_tx_r32(dev, cur->base, cur->offset); + break; + case DUMP_TYPE_WPDMA_TXFREE: + val = wpdma_txfree_r32(dev, cur->offset); + break; + } + print_reg_val(s, cur->name, val); + } +} + + +static int +wed_txinfo_show(struct seq_file *s, void *data) +{ + static const struct reg_dump regs[] = { + DUMP_STR("WED TX"), + DUMP_WED(WED_TX_MIB(0)), + DUMP_WED_RING(WED_RING_TX(0)), + + DUMP_WED(WED_TX_MIB(1)), + DUMP_WED_RING(WED_RING_TX(1)), + + DUMP_STR("WPDMA TX"), + DUMP_WED(WED_WPDMA_TX_MIB(0)), + DUMP_WED_RING(WED_WPDMA_RING_TX(0)), + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(0)), + + DUMP_WED(WED_WPDMA_TX_MIB(1)), + DUMP_WED_RING(WED_WPDMA_RING_TX(1)), + DUMP_WED(WED_WPDMA_TX_COHERENT_MIB(1)), + + DUMP_STR("WPDMA TX"), + DUMP_WPDMA_TX_RING(0), + DUMP_WPDMA_TX_RING(1), + + DUMP_STR("WED WDMA RX"), + DUMP_WED(WED_WDMA_RX_MIB(0)), + DUMP_WED_RING(WED_WDMA_RING_RX(0)), + DUMP_WED(WED_WDMA_RX_THRES(0)), + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(0)), + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(0)), + + DUMP_WED(WED_WDMA_RX_MIB(1)), + DUMP_WED_RING(WED_WDMA_RING_RX(1)), + DUMP_WED(WED_WDMA_RX_THRES(1)), + DUMP_WED(WED_WDMA_RX_RECYCLE_MIB(1)), + DUMP_WED(WED_WDMA_RX_PROCESSED_MIB(1)), + + DUMP_STR("WDMA RX"), + DUMP_WDMA(WDMA_GLO_CFG), + DUMP_WDMA_RING(WDMA_RING_RX(0)), + DUMP_WDMA_RING(WDMA_RING_RX(1)), + + DUMP_STR("TX FREE"), + DUMP_WED(WED_RX_MIB(0)), + }; + struct mtk_wed_hw *hw = s->private; + struct mtk_wed_device *dev = hw->wed_dev; + + if (!dev) + return 0; + + dump_wed_regs(s, dev, regs, ARRAY_SIZE(regs)); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(wed_txinfo); + + +static int +mtk_wed_reg_set(void *data, u64 val) +{ + struct mtk_wed_hw *hw = data; + + regmap_write(hw->regs, hw->debugfs_reg, val); + + return 0; +} + +static int +mtk_wed_reg_get(void *data, u64 *val) +{ + struct mtk_wed_hw *hw = data; + unsigned int regval; + int ret; + + ret = regmap_read(hw->regs, hw->debugfs_reg, ®val); + if (ret) + return ret; + + *val = regval; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mtk_wed_reg_get, mtk_wed_reg_set, + "0x%08llx\n"); + +void mtk_wed_hw_add_debugfs(struct mtk_wed_hw *hw) +{ + struct dentry *dir; + + snprintf(hw->dirname, sizeof(hw->dirname), "wed%d", hw->index); + dir = debugfs_create_dir(hw->dirname, NULL); + if (!dir) + return; + + hw->debugfs_dir = dir; + debugfs_create_u32("regidx", 0600, dir, &hw->debugfs_reg); + debugfs_create_file_unsafe("regval", 0600, dir, hw, &fops_regval); + debugfs_create_file_unsafe("txinfo", 0400, dir, hw, &wed_txinfo_fops); +} diff --git a/drivers/net/ethernet/mediatek/mtk_wed_ops.c b/drivers/net/ethernet/mediatek/mtk_wed_ops.c new file mode 100644 index 000000000..a5d9d8a5b --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_ops.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#include <linux/kernel.h> +#include <linux/soc/mediatek/mtk_wed.h> + +const struct mtk_wed_ops __rcu *mtk_soc_wed_ops; +EXPORT_SYMBOL_GPL(mtk_soc_wed_ops); diff --git a/drivers/net/ethernet/mediatek/mtk_wed_regs.h b/drivers/net/ethernet/mediatek/mtk_wed_regs.h new file mode 100644 index 000000000..14cd44f81 --- /dev/null +++ b/drivers/net/ethernet/mediatek/mtk_wed_regs.h @@ -0,0 +1,332 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (C) 2020 Felix Fietkau <nbd@nbd.name> */ + +#ifndef __MTK_WED_REGS_H +#define __MTK_WED_REGS_H + +#define MTK_WDMA_DESC_CTRL_LEN1 GENMASK(14, 0) +#define MTK_WDMA_DESC_CTRL_LEN1_V2 GENMASK(13, 0) +#define MTK_WDMA_DESC_CTRL_LAST_SEG1 BIT(15) +#define MTK_WDMA_DESC_CTRL_BURST BIT(16) +#define MTK_WDMA_DESC_CTRL_LEN0 GENMASK(29, 16) +#define MTK_WDMA_DESC_CTRL_LAST_SEG0 BIT(30) +#define MTK_WDMA_DESC_CTRL_DMA_DONE BIT(31) + +struct mtk_wdma_desc { + __le32 buf0; + __le32 ctrl; + __le32 buf1; + __le32 info; +} __packed __aligned(4); + +#define MTK_WED_RESET 0x008 +#define MTK_WED_RESET_TX_BM BIT(0) +#define MTK_WED_RESET_TX_FREE_AGENT BIT(4) +#define MTK_WED_RESET_WPDMA_TX_DRV BIT(8) +#define MTK_WED_RESET_WPDMA_RX_DRV BIT(9) +#define MTK_WED_RESET_WPDMA_INT_AGENT BIT(11) +#define MTK_WED_RESET_WED_TX_DMA BIT(12) +#define MTK_WED_RESET_WDMA_RX_DRV BIT(17) +#define MTK_WED_RESET_WDMA_INT_AGENT BIT(19) +#define MTK_WED_RESET_WED BIT(31) + +#define MTK_WED_CTRL 0x00c +#define MTK_WED_CTRL_WPDMA_INT_AGENT_EN BIT(0) +#define MTK_WED_CTRL_WPDMA_INT_AGENT_BUSY BIT(1) +#define MTK_WED_CTRL_WDMA_INT_AGENT_EN BIT(2) +#define MTK_WED_CTRL_WDMA_INT_AGENT_BUSY BIT(3) +#define MTK_WED_CTRL_WED_TX_BM_EN BIT(8) +#define MTK_WED_CTRL_WED_TX_BM_BUSY BIT(9) +#define MTK_WED_CTRL_WED_TX_FREE_AGENT_EN BIT(10) +#define MTK_WED_CTRL_WED_TX_FREE_AGENT_BUSY BIT(11) +#define MTK_WED_CTRL_RESERVE_EN BIT(12) +#define MTK_WED_CTRL_RESERVE_BUSY BIT(13) +#define MTK_WED_CTRL_FINAL_DIDX_READ BIT(24) +#define MTK_WED_CTRL_ETH_DMAD_FMT BIT(25) +#define MTK_WED_CTRL_MIB_READ_CLEAR BIT(28) + +#define MTK_WED_EXT_INT_STATUS 0x020 +#define MTK_WED_EXT_INT_STATUS_TF_LEN_ERR BIT(0) +#define MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD BIT(1) +#define MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID BIT(4) +#define MTK_WED_EXT_INT_STATUS_TX_FBUF_LO_TH BIT(8) +#define MTK_WED_EXT_INT_STATUS_TX_FBUF_HI_TH BIT(9) +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH BIT(10) /* wed v2 */ +#define MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH BIT(11) /* wed v2 */ +#define MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR BIT(16) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR BIT(17) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT BIT(18) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN BIT(19) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_BM_DMAD_COHERENT BIT(20) +#define MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR BIT(21) +#define MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR BIT(22) +#define MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR BIT(23) +#define MTK_WED_EXT_INT_STATUS_RX_DRV_DMA_RECYCLE BIT(24) +#define MTK_WED_EXT_INT_STATUS_ERROR_MASK (MTK_WED_EXT_INT_STATUS_TF_LEN_ERR | \ + MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD | \ + MTK_WED_EXT_INT_STATUS_TKID_TITO_INVALID | \ + MTK_WED_EXT_INT_STATUS_RX_DRV_R_RESP_ERR | \ + MTK_WED_EXT_INT_STATUS_RX_DRV_W_RESP_ERR | \ + MTK_WED_EXT_INT_STATUS_RX_DRV_INIT_WDMA_EN | \ + MTK_WED_EXT_INT_STATUS_TX_DMA_R_RESP_ERR) + +#define MTK_WED_EXT_INT_MASK 0x028 + +#define MTK_WED_STATUS 0x060 +#define MTK_WED_STATUS_TX GENMASK(15, 8) + +#define MTK_WED_TX_BM_CTRL 0x080 +#define MTK_WED_TX_BM_CTRL_VLD_GRP_NUM GENMASK(6, 0) +#define MTK_WED_TX_BM_CTRL_RSV_GRP_NUM GENMASK(22, 16) +#define MTK_WED_TX_BM_CTRL_PAUSE BIT(28) + +#define MTK_WED_TX_BM_BASE 0x084 + +#define MTK_WED_TX_BM_TKID 0x088 +#define MTK_WED_TX_BM_TKID_V2 0x0c8 +#define MTK_WED_TX_BM_TKID_START GENMASK(15, 0) +#define MTK_WED_TX_BM_TKID_END GENMASK(31, 16) + +#define MTK_WED_TX_BM_BUF_LEN 0x08c + +#define MTK_WED_TX_BM_INTF 0x09c +#define MTK_WED_TX_BM_INTF_TKID GENMASK(15, 0) +#define MTK_WED_TX_BM_INTF_TKFIFO_FDEP GENMASK(23, 16) +#define MTK_WED_TX_BM_INTF_TKID_VALID BIT(28) +#define MTK_WED_TX_BM_INTF_TKID_READ BIT(29) + +#define MTK_WED_TX_BM_DYN_THR 0x0a0 +#define MTK_WED_TX_BM_DYN_THR_LO GENMASK(6, 0) +#define MTK_WED_TX_BM_DYN_THR_LO_V2 GENMASK(8, 0) +#define MTK_WED_TX_BM_DYN_THR_HI GENMASK(22, 16) +#define MTK_WED_TX_BM_DYN_THR_HI_V2 GENMASK(24, 16) + +#define MTK_WED_TX_TKID_CTRL 0x0c0 +#define MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM GENMASK(6, 0) +#define MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM GENMASK(22, 16) +#define MTK_WED_TX_TKID_CTRL_PAUSE BIT(28) + +#define MTK_WED_TX_TKID_DYN_THR 0x0e0 +#define MTK_WED_TX_TKID_DYN_THR_LO GENMASK(6, 0) +#define MTK_WED_TX_TKID_DYN_THR_HI GENMASK(22, 16) + +#define MTK_WED_TXP_DW0 0x120 +#define MTK_WED_TXP_DW1 0x124 +#define MTK_WED_WPDMA_WRITE_TXP GENMASK(31, 16) +#define MTK_WED_TXDP_CTRL 0x130 +#define MTK_WED_TXDP_DW9_OVERWR BIT(9) +#define MTK_WED_RX_BM_TKID_MIB 0x1cc + +#define MTK_WED_INT_STATUS 0x200 +#define MTK_WED_INT_MASK 0x204 + +#define MTK_WED_GLO_CFG 0x208 +#define MTK_WED_GLO_CFG_TX_DMA_EN BIT(0) +#define MTK_WED_GLO_CFG_TX_DMA_BUSY BIT(1) +#define MTK_WED_GLO_CFG_RX_DMA_EN BIT(2) +#define MTK_WED_GLO_CFG_RX_DMA_BUSY BIT(3) +#define MTK_WED_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) +#define MTK_WED_GLO_CFG_TX_WB_DDONE BIT(6) +#define MTK_WED_GLO_CFG_BIG_ENDIAN BIT(7) +#define MTK_WED_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) +#define MTK_WED_GLO_CFG_TX_BT_SIZE_LO BIT(9) +#define MTK_WED_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) +#define MTK_WED_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) +#define MTK_WED_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) +#define MTK_WED_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) +#define MTK_WED_GLO_CFG_SW_RESET BIT(24) +#define MTK_WED_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) +#define MTK_WED_GLO_CFG_OMIT_RX_INFO BIT(27) +#define MTK_WED_GLO_CFG_OMIT_TX_INFO BIT(28) +#define MTK_WED_GLO_CFG_BYTE_SWAP BIT(29) +#define MTK_WED_GLO_CFG_RX_2B_OFFSET BIT(31) + +#define MTK_WED_RESET_IDX 0x20c +#define MTK_WED_RESET_IDX_TX GENMASK(3, 0) +#define MTK_WED_RESET_IDX_RX GENMASK(17, 16) + +#define MTK_WED_TX_MIB(_n) (0x2a0 + (_n) * 4) +#define MTK_WED_RX_MIB(_n) (0x2e0 + (_n) * 4) + +#define MTK_WED_RING_TX(_n) (0x300 + (_n) * 0x10) + +#define MTK_WED_RING_RX(_n) (0x400 + (_n) * 0x10) + +#define MTK_WED_WPDMA_INT_TRIGGER 0x504 +#define MTK_WED_WPDMA_INT_TRIGGER_RX_DONE BIT(1) +#define MTK_WED_WPDMA_INT_TRIGGER_TX_DONE GENMASK(5, 4) + +#define MTK_WED_WPDMA_GLO_CFG 0x508 +#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN BIT(0) +#define MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY BIT(1) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN BIT(2) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY BIT(3) +#define MTK_WED_WPDMA_GLO_CFG_RX_BT_SIZE GENMASK(5, 4) +#define MTK_WED_WPDMA_GLO_CFG_TX_WB_DDONE BIT(6) +#define MTK_WED_WPDMA_GLO_CFG_BIG_ENDIAN BIT(7) +#define MTK_WED_WPDMA_GLO_CFG_DIS_BT_SIZE_ALIGN BIT(8) +#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_LO BIT(9) +#define MTK_WED_WPDMA_GLO_CFG_MULTI_DMA_EN GENMASK(11, 10) +#define MTK_WED_WPDMA_GLO_CFG_FIFO_LITTLE_ENDIAN BIT(12) +#define MTK_WED_WPDMA_GLO_CFG_MI_DEPTH_RD GENMASK(21, 13) +#define MTK_WED_WPDMA_GLO_CFG_TX_BT_SIZE_HI GENMASK(23, 22) +#define MTK_WED_WPDMA_GLO_CFG_SW_RESET BIT(24) +#define MTK_WED_WPDMA_GLO_CFG_FIRST_TOKEN_ONLY BIT(26) +#define MTK_WED_WPDMA_GLO_CFG_OMIT_RX_INFO BIT(27) +#define MTK_WED_WPDMA_GLO_CFG_OMIT_TX_INFO BIT(28) +#define MTK_WED_WPDMA_GLO_CFG_BYTE_SWAP BIT(29) +#define MTK_WED_WPDMA_GLO_CFG_RX_2B_OFFSET BIT(31) + +/* CONFIG_MEDIATEK_NETSYS_V2 */ +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC BIT(4) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_PKT_PROC BIT(5) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC BIT(6) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_R1_CRX_SYNC BIT(7) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_VER GENMASK(18, 16) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNSUPPORT_FMT BIT(19) +#define MTK_WED_WPDMA_GLO_CFG_RX_DRV_UEVENT_PKT_FMT_CHK BIT(20) +#define MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR BIT(21) +#define MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP BIT(24) +#define MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV BIT(28) + +#define MTK_WED_WPDMA_RESET_IDX 0x50c +#define MTK_WED_WPDMA_RESET_IDX_TX GENMASK(3, 0) +#define MTK_WED_WPDMA_RESET_IDX_RX GENMASK(17, 16) + +#define MTK_WED_WPDMA_CTRL 0x518 +#define MTK_WED_WPDMA_CTRL_SDL1_FIXED BIT(31) + +#define MTK_WED_WPDMA_INT_CTRL 0x520 +#define MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV BIT(21) +#define MTK_WED_WPDMA_INT_CTRL_SIG_SRC BIT(22) +#define MTK_WED_WPDMA_INT_CTRL_SRC_SEL GENMASK(17, 16) + +#define MTK_WED_WPDMA_INT_MASK 0x524 + +#define MTK_WED_WPDMA_INT_CTRL_TX 0x530 +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN BIT(0) +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR BIT(1) +#define MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG GENMASK(6, 2) +#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN BIT(8) +#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR BIT(9) +#define MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG GENMASK(14, 10) + +#define MTK_WED_WPDMA_INT_CTRL_RX 0x534 + +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE 0x538 +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN BIT(0) +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR BIT(1) +#define MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG GENMASK(6, 2) + +#define MTK_WED_PCIE_CFG_BASE 0x560 + +#define MTK_WED_PCIE_CFG_BASE 0x560 +#define MTK_WED_PCIE_CFG_INTM 0x564 +#define MTK_WED_PCIE_CFG_MSIS 0x568 +#define MTK_WED_PCIE_INT_TRIGGER 0x570 +#define MTK_WED_PCIE_INT_TRIGGER_STATUS BIT(16) + +#define MTK_WED_PCIE_INT_CTRL 0x57c +#define MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA BIT(20) +#define MTK_WED_PCIE_INT_CTRL_SRC_SEL GENMASK(17, 16) +#define MTK_WED_PCIE_INT_CTRL_POLL_EN GENMASK(13, 12) + +#define MTK_WED_WPDMA_CFG_BASE 0x580 +#define MTK_WED_WPDMA_CFG_INT_MASK 0x584 +#define MTK_WED_WPDMA_CFG_TX 0x588 +#define MTK_WED_WPDMA_CFG_TX_FREE 0x58c + +#define MTK_WED_WPDMA_TX_MIB(_n) (0x5a0 + (_n) * 4) +#define MTK_WED_WPDMA_TX_COHERENT_MIB(_n) (0x5d0 + (_n) * 4) + +#define MTK_WED_WPDMA_RING_TX(_n) (0x600 + (_n) * 0x10) +#define MTK_WED_WPDMA_RING_RX(_n) (0x700 + (_n) * 0x10) +#define MTK_WED_WDMA_RING_RX(_n) (0x900 + (_n) * 0x10) +#define MTK_WED_WDMA_RX_THRES(_n) (0x940 + (_n) * 0x4) + +#define MTK_WED_WDMA_GLO_CFG 0xa04 +#define MTK_WED_WDMA_GLO_CFG_TX_DRV_EN BIT(0) +#define MTK_WED_WDMA_GLO_CFG_RX_DRV_EN BIT(2) +#define MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY BIT(3) +#define MTK_WED_WDMA_GLO_CFG_BT_SIZE GENMASK(5, 4) +#define MTK_WED_WDMA_GLO_CFG_TX_WB_DDONE BIT(6) +#define MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE BIT(13) +#define MTK_WED_WDMA_GLO_CFG_WCOMPLETE_SEL BIT(16) +#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_RXDMA_BYPASS BIT(17) +#define MTK_WED_WDMA_GLO_CFG_INIT_PHASE_BYPASS BIT(18) +#define MTK_WED_WDMA_GLO_CFG_FSM_RETURN_IDLE BIT(19) +#define MTK_WED_WDMA_GLO_CFG_WAIT_COHERENT BIT(20) +#define MTK_WED_WDMA_GLO_CFG_AXI_W_AFTER_AW BIT(21) +#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY_SINGLE_W BIT(22) +#define MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY BIT(23) +#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP BIT(24) +#define MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE BIT(25) +#define MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE BIT(26) +#define MTK_WED_WDMA_GLO_CFG_RXDRV_CLKGATE_BYPASS BIT(30) + +#define MTK_WED_WDMA_RESET_IDX 0xa08 +#define MTK_WED_WDMA_RESET_IDX_RX GENMASK(17, 16) +#define MTK_WED_WDMA_RESET_IDX_DRV GENMASK(25, 24) + +#define MTK_WED_WDMA_INT_CLR 0xa24 +#define MTK_WED_WDMA_INT_CLR_RX_DONE GENMASK(17, 16) + +#define MTK_WED_WDMA_INT_TRIGGER 0xa28 +#define MTK_WED_WDMA_INT_TRIGGER_RX_DONE GENMASK(17, 16) + +#define MTK_WED_WDMA_INT_CTRL 0xa2c +#define MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL GENMASK(17, 16) + +#define MTK_WED_WDMA_CFG_BASE 0xaa0 +#define MTK_WED_WDMA_OFFSET0 0xaa4 +#define MTK_WED_WDMA_OFFSET1 0xaa8 + +#define MTK_WED_WDMA_OFST0_GLO_INTS GENMASK(15, 0) +#define MTK_WED_WDMA_OFST0_GLO_CFG GENMASK(31, 16) +#define MTK_WED_WDMA_OFST1_TX_CTRL GENMASK(15, 0) +#define MTK_WED_WDMA_OFST1_RX_CTRL GENMASK(31, 16) + +#define MTK_WED_WDMA_RX_MIB(_n) (0xae0 + (_n) * 4) +#define MTK_WED_WDMA_RX_RECYCLE_MIB(_n) (0xae8 + (_n) * 4) +#define MTK_WED_WDMA_RX_PROCESSED_MIB(_n) (0xaf0 + (_n) * 4) + +#define MTK_WED_RING_OFS_BASE 0x00 +#define MTK_WED_RING_OFS_COUNT 0x04 +#define MTK_WED_RING_OFS_CPU_IDX 0x08 +#define MTK_WED_RING_OFS_DMA_IDX 0x0c + +#define MTK_WDMA_RING_TX(_n) (0x000 + (_n) * 0x10) +#define MTK_WDMA_RING_RX(_n) (0x100 + (_n) * 0x10) + +#define MTK_WDMA_GLO_CFG 0x204 +#define MTK_WDMA_GLO_CFG_TX_DMA_EN BIT(0) +#define MTK_WDMA_GLO_CFG_RX_DMA_EN BIT(2) +#define MTK_WDMA_GLO_CFG_RX_INFO3_PRERES BIT(26) +#define MTK_WDMA_GLO_CFG_RX_INFO2_PRERES BIT(27) +#define MTK_WDMA_GLO_CFG_RX_INFO1_PRERES BIT(28) + +#define MTK_WDMA_RESET_IDX 0x208 +#define MTK_WDMA_RESET_IDX_TX GENMASK(3, 0) +#define MTK_WDMA_RESET_IDX_RX GENMASK(17, 16) + +#define MTK_WDMA_INT_STATUS 0x220 + +#define MTK_WDMA_INT_MASK 0x228 +#define MTK_WDMA_INT_MASK_TX_DONE GENMASK(3, 0) +#define MTK_WDMA_INT_MASK_RX_DONE GENMASK(17, 16) +#define MTK_WDMA_INT_MASK_TX_DELAY BIT(28) +#define MTK_WDMA_INT_MASK_TX_COHERENT BIT(29) +#define MTK_WDMA_INT_MASK_RX_DELAY BIT(30) +#define MTK_WDMA_INT_MASK_RX_COHERENT BIT(31) + +#define MTK_WDMA_INT_GRP1 0x250 +#define MTK_WDMA_INT_GRP2 0x254 + +#define MTK_PCIE_MIRROR_MAP(n) ((n) ? 0x4 : 0x0) +#define MTK_PCIE_MIRROR_MAP_EN BIT(0) +#define MTK_PCIE_MIRROR_MAP_WED_ID BIT(1) + +/* DMA channel mapping */ +#define HIFSYS_DMA_AG_MAP 0x008 + +#endif |