summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/stmicro
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/stmicro')
-rw-r--r--drivers/net/ethernet/stmicro/Kconfig23
-rw-r--r--drivers/net/ethernet/stmicro/Makefile6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig277
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile41
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c257
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c168
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h568
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs.h186
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/descs_com.h121
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c155
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c513
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c100
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c319
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c400
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c195
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c1241
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h53
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c512
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c235
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c97
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c715
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c105
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c549
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c245
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c615
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c1938
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c536
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c436
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c545
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c1344
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c193
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c300
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100.h111
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000.h333
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c556
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c291
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c193
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c127
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4.h520
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c1334
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c585
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h147
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c577
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h238
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c237
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.c777
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac5.h164
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h166
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c292
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h474
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c1649
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c373
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c582
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h22
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c475
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.c342
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h642
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc.h130
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/mmc_core.c479
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/norm_desc.c326
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c148
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h404
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c1202
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c224
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c7679
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c584
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c313
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h155
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c840
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h33
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c339
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h91
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c2049
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c1115
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c135
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h15
77 files changed, 40485 insertions, 0 deletions
diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig
new file mode 100644
index 000000000..cc136b4c9
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/Kconfig
@@ -0,0 +1,23 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# STMicroelectronics device configuration
+#
+
+config NET_VENDOR_STMICRO
+ bool "STMicroelectronics devices"
+ default y
+ depends on HAS_IOMEM
+ help
+ If you have a network (Ethernet) card based on Synopsys Ethernet IP
+ Cores, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about STMicroelectronics cards. If you say Y, you will
+ be asked for your specific card in the following questions.
+
+if NET_VENDOR_STMICRO
+
+source "drivers/net/ethernet/stmicro/stmmac/Kconfig"
+
+endif # NET_VENDOR_STMICRO
diff --git a/drivers/net/ethernet/stmicro/Makefile b/drivers/net/ethernet/stmicro/Makefile
new file mode 100644
index 000000000..72fd1f6ab
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the STMicroelectronics device drivers.
+#
+
+obj-$(CONFIG_STMMAC_ETH) += stmmac/
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
new file mode 100644
index 000000000..58091ee2b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -0,0 +1,277 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config STMMAC_ETH
+ tristate "STMicroelectronics Multi-Gigabit Ethernet driver"
+ depends on HAS_IOMEM && HAS_DMA
+ depends on PTP_1588_CLOCK_OPTIONAL
+ select MII
+ select PCS_XPCS
+ select PAGE_POOL
+ select PHYLINK
+ select CRC32
+ select RESET_CONTROLLER
+ help
+ This is the driver for the Ethernet IPs built around a
+ Synopsys IP Core.
+
+if STMMAC_ETH
+
+config STMMAC_SELFTESTS
+ bool "Support for STMMAC Selftests"
+ depends on INET
+ depends on STMMAC_ETH
+ default n
+ help
+ This adds support for STMMAC Selftests using ethtool. Enable this
+ feature if you are facing problems with your HW and submit the test
+ results to the netdev Mailing List.
+
+config STMMAC_PLATFORM
+ tristate "STMMAC Platform bus support"
+ depends on STMMAC_ETH
+ select MFD_SYSCON
+ default y
+ help
+ This selects the platform specific bus support for the stmmac driver.
+ This is the driver used on several SoCs:
+ STi, Allwinner, Amlogic Meson, Altera SOCFPGA.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
+if STMMAC_PLATFORM
+
+config DWMAC_DWC_QOS_ETH
+ tristate "Support for snps,dwc-qos-ethernet.txt DT binding."
+ select CRC32
+ select MII
+ depends on OF && HAS_DMA
+ help
+ Support for chips using the snps,dwc-qos-ethernet.txt DT binding.
+
+config DWMAC_GENERIC
+ tristate "Generic driver for DWMAC"
+ default STMMAC_PLATFORM
+ help
+ Generic DWMAC driver for platforms that don't require any
+ platform specific code to function or is using platform
+ data for setup.
+
+config DWMAC_ANARION
+ tristate "Adaptrum Anarion GMAC support"
+ default ARC
+ depends on OF && (ARC || COMPILE_TEST)
+ help
+ Support for Adaptrum Anarion GMAC Ethernet controller.
+
+ This selects the Anarion SoC glue layer support for the stmmac driver.
+
+config DWMAC_INGENIC
+ tristate "Ingenic MAC support"
+ default MACH_INGENIC
+ depends on OF && HAS_IOMEM && (MACH_INGENIC || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on Ingenic SoCs.
+
+ This selects Ingenic SoCs glue layer support for the stmmac
+ device driver. This driver is used on for the Ingenic SoCs
+ MAC ethernet controller.
+
+config DWMAC_IPQ806X
+ tristate "QCA IPQ806x DWMAC support"
+ default ARCH_QCOM
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for QCA IPQ806X DWMAC Ethernet.
+
+ This selects the IPQ806x SoC glue layer support for the stmmac
+ device driver. This driver does not use any of the hardware
+ acceleration features available on this SoC. Network devices
+ will behave like standard non-accelerated ethernet interfaces.
+
+ Select the QCOM_SOCINFO config flag to enable specific dwmac
+ fixup based on the ipq806x SoC revision.
+
+config DWMAC_LPC18XX
+ tristate "NXP LPC18xx/43xx DWMAC support"
+ default ARCH_LPC18XX
+ depends on OF && (ARCH_LPC18XX || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for NXP LPC18xx/43xx DWMAC Ethernet.
+
+config DWMAC_MEDIATEK
+ tristate "MediaTek MT27xx GMAC support"
+ depends on OF && (ARCH_MEDIATEK || COMPILE_TEST)
+ help
+ Support for MediaTek GMAC Ethernet controller.
+
+ This selects the MT2712 SoC support for the stmmac driver.
+
+config DWMAC_MESON
+ tristate "Amlogic Meson dwmac support"
+ default ARCH_MESON
+ depends on OF && COMMON_CLK && (ARCH_MESON || COMPILE_TEST)
+ help
+ Support for Ethernet controller on Amlogic Meson SoCs.
+
+ This selects the Amlogic Meson SoC glue layer support for
+ the stmmac device driver. This driver is used for Meson6,
+ Meson8, Meson8b and GXBB SoCs.
+
+config DWMAC_OXNAS
+ tristate "Oxford Semiconductor OXNAS dwmac support"
+ default ARCH_OXNAS
+ depends on OF && COMMON_CLK && (ARCH_OXNAS || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for Ethernet controller on Oxford Semiconductor OXNAS SoCs.
+
+ This selects the Oxford Semiconductor OXNASSoC glue layer support for
+ the stmmac device driver. This driver is used for OX820.
+
+config DWMAC_QCOM_ETHQOS
+ tristate "Qualcomm ETHQOS support"
+ default ARCH_QCOM
+ depends on OF && (ARCH_QCOM || COMPILE_TEST)
+ help
+ Support for the Qualcomm ETHQOS core.
+
+ This selects the Qualcomm ETHQOS glue layer support for the
+ stmmac device driver.
+
+config DWMAC_ROCKCHIP
+ tristate "Rockchip dwmac support"
+ default ARCH_ROCKCHIP
+ depends on OF && (ARCH_ROCKCHIP || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for Ethernet controller on Rockchip RK3288 SoC.
+
+ This selects the Rockchip RK3288 SoC glue layer support for
+ the stmmac device driver.
+
+config DWMAC_SOCFPGA
+ tristate "SOCFPGA dwmac support"
+ default ARCH_INTEL_SOCFPGA
+ depends on OF && (ARCH_INTEL_SOCFPGA || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on Altera SOCFPGA
+
+ This selects the Altera SOCFPGA SoC glue layer support
+ for the stmmac device driver. This driver is used for
+ arria5 and cyclone5 FPGA SoCs.
+
+config DWMAC_STI
+ tristate "STi GMAC support"
+ default ARCH_STI
+ depends on OF && (ARCH_STI || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on STi SOCs.
+
+ This selects STi SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STi series
+ SOCs GMAC ethernet controller.
+
+config DWMAC_STM32
+ tristate "STM32 DWMAC support"
+ default ARCH_STM32
+ depends on OF && HAS_IOMEM && (ARCH_STM32 || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on STM32 SOCs.
+
+ This selects STM32 SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STM32 series
+ SOCs GMAC ethernet controller.
+
+config DWMAC_SUNXI
+ tristate "Allwinner GMAC support"
+ default ARCH_SUNXI
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
+ help
+ Support for Allwinner A20/A31 GMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for A20/A31
+ GMAC ethernet controller.
+
+config DWMAC_SUN8I
+ tristate "Allwinner sun8i GMAC support"
+ default ARCH_SUNXI
+ depends on OF && (ARCH_SUNXI || COMPILE_TEST)
+ select MDIO_BUS_MUX
+ help
+ Support for Allwinner H3 A83T A64 EMAC ethernet controllers.
+
+ This selects Allwinner SoC glue layer support for the
+ stmmac device driver. This driver is used for H3/A83T/A64
+ EMAC ethernet controller.
+
+config DWMAC_IMX8
+ tristate "NXP IMX8 DWMAC support"
+ default ARCH_MXC
+ depends on OF && (ARCH_MXC || COMPILE_TEST)
+ select MFD_SYSCON
+ help
+ Support for ethernet controller on NXP i.MX8 SOCs.
+
+ This selects NXP SoC glue layer support for the stmmac
+ device driver. This driver is used for i.MX8 series like
+ iMX8MP/iMX8DXL GMAC ethernet controller.
+
+config DWMAC_INTEL_PLAT
+ tristate "Intel dwmac support"
+ depends on OF && COMMON_CLK
+ depends on STMMAC_ETH
+ help
+ Support for ethernet controllers on Intel SoCs
+
+ This selects the Intel platform specific glue layer support for
+ the stmmac device driver. This driver is used for the Intel Keem Bay
+ SoC.
+
+config DWMAC_VISCONTI
+ tristate "Toshiba Visconti DWMAC support"
+ default ARCH_VISCONTI
+ depends on OF && COMMON_CLK && (ARCH_VISCONTI || COMPILE_TEST)
+ help
+ Support for ethernet controller on Visconti SoCs.
+
+endif
+
+config DWMAC_INTEL
+ tristate "Intel GMAC support"
+ default X86
+ depends on X86 && STMMAC_ETH && PCI
+ depends on COMMON_CLK
+ help
+ This selects the Intel platform specific bus support for the
+ stmmac driver. This driver is used for Intel Quark/EHL/TGL.
+
+config DWMAC_LOONGSON
+ tristate "Loongson PCI DWMAC support"
+ default MACH_LOONGSON64
+ depends on (MACH_LOONGSON64 || COMPILE_TEST) && STMMAC_ETH && PCI
+ depends on COMMON_CLK
+ help
+ This selects the LOONGSON PCI bus support for the stmmac driver,
+ Support for ethernet controller on Loongson-2K1000 SoC and LS7A1000 bridge.
+
+config STMMAC_PCI
+ tristate "STMMAC PCI bus support"
+ depends on STMMAC_ETH && PCI
+ depends on COMMON_CLK
+ help
+ This selects the platform specific bus support for the stmmac driver.
+ This driver was tested on XLINX XC2V3000 FF1152AMT0221
+ D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
new file mode 100644
index 000000000..d4e12e9ac
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_STMMAC_ETH) += stmmac.o
+stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
+ chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
+ dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
+ mmc_core.o stmmac_hwtstamp.o stmmac_ptp.o dwmac4_descs.o \
+ dwmac4_dma.o dwmac4_lib.o dwmac4_core.o dwmac5.o hwif.o \
+ stmmac_tc.o dwxgmac2_core.o dwxgmac2_dma.o dwxgmac2_descs.o \
+ stmmac_xdp.o \
+ $(stmmac-y)
+
+stmmac-$(CONFIG_STMMAC_SELFTESTS) += stmmac_selftests.o
+
+# Ordering matters. Generic driver must be last.
+obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
+obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o
+obj-$(CONFIG_DWMAC_INGENIC) += dwmac-ingenic.o
+obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
+obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
+obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o
+obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o
+obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o
+obj-$(CONFIG_DWMAC_QCOM_ETHQOS) += dwmac-qcom-ethqos.o
+obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
+obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o
+obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o
+obj-$(CONFIG_DWMAC_STM32) += dwmac-stm32.o
+obj-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+obj-$(CONFIG_DWMAC_SUN8I) += dwmac-sun8i.o
+obj-$(CONFIG_DWMAC_DWC_QOS_ETH) += dwmac-dwc-qos-eth.o
+obj-$(CONFIG_DWMAC_INTEL_PLAT) += dwmac-intel-plat.o
+obj-$(CONFIG_DWMAC_GENERIC) += dwmac-generic.o
+obj-$(CONFIG_DWMAC_IMX8) += dwmac-imx.o
+obj-$(CONFIG_DWMAC_VISCONTI) += dwmac-visconti.o
+stmmac-platform-objs:= stmmac_platform.o
+dwmac-altr-socfpga-objs := altr_tse_pcs.o dwmac-socfpga.o
+
+obj-$(CONFIG_STMMAC_PCI) += stmmac-pci.o
+obj-$(CONFIG_DWMAC_INTEL) += dwmac-intel.o
+obj-$(CONFIG_DWMAC_LOONGSON) += dwmac-loongson.o
+stmmac-pci-objs:= stmmac_pci.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
new file mode 100644
index 000000000..00f6d347e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright Altera Corporation (C) 2016. All rights reserved.
+ *
+ * Author: Tien Hock Loh <thloh@altera.com>
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+#include "altr_tse_pcs.h"
+
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII BIT(1)
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII BIT(2)
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK GENMASK(1, 0)
+
+#define TSE_PCS_CONTROL_AN_EN_MASK BIT(12)
+#define TSE_PCS_CONTROL_REG 0x00
+#define TSE_PCS_CONTROL_RESTART_AN_MASK BIT(9)
+#define TSE_PCS_CTRL_AUTONEG_SGMII 0x1140
+#define TSE_PCS_IF_MODE_REG 0x28
+#define TSE_PCS_LINK_TIMER_0_REG 0x24
+#define TSE_PCS_LINK_TIMER_1_REG 0x26
+#define TSE_PCS_SIZE 0x40
+#define TSE_PCS_STATUS_AN_COMPLETED_MASK BIT(5)
+#define TSE_PCS_STATUS_LINK_MASK 0x0004
+#define TSE_PCS_STATUS_REG 0x02
+#define TSE_PCS_SGMII_SPEED_1000 BIT(3)
+#define TSE_PCS_SGMII_SPEED_100 BIT(2)
+#define TSE_PCS_SGMII_SPEED_10 0x0
+#define TSE_PCS_SW_RST_MASK 0x8000
+#define TSE_PCS_PARTNER_ABILITY_REG 0x0A
+#define TSE_PCS_PARTNER_DUPLEX_FULL 0x1000
+#define TSE_PCS_PARTNER_DUPLEX_HALF 0x0000
+#define TSE_PCS_PARTNER_DUPLEX_MASK 0x1000
+#define TSE_PCS_PARTNER_SPEED_MASK GENMASK(11, 10)
+#define TSE_PCS_PARTNER_SPEED_1000 BIT(11)
+#define TSE_PCS_PARTNER_SPEED_100 BIT(10)
+#define TSE_PCS_PARTNER_SPEED_10 0x0000
+#define TSE_PCS_PARTNER_SPEED_1000 BIT(11)
+#define TSE_PCS_PARTNER_SPEED_100 BIT(10)
+#define TSE_PCS_PARTNER_SPEED_10 0x0000
+#define TSE_PCS_SGMII_SPEED_MASK GENMASK(3, 2)
+#define TSE_PCS_SGMII_LINK_TIMER_0 0x0D40
+#define TSE_PCS_SGMII_LINK_TIMER_1 0x0003
+#define TSE_PCS_SW_RESET_TIMEOUT 100
+#define TSE_PCS_USE_SGMII_AN_MASK BIT(1)
+#define TSE_PCS_USE_SGMII_ENA BIT(0)
+#define TSE_PCS_IF_USE_SGMII 0x03
+
+#define AUTONEGO_LINK_TIMER 20
+
+static int tse_pcs_reset(void __iomem *base, struct tse_pcs *pcs)
+{
+ int counter = 0;
+ u16 val;
+
+ val = readw(base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_SW_RST_MASK;
+ writew(val, base + TSE_PCS_CONTROL_REG);
+
+ while (counter < TSE_PCS_SW_RESET_TIMEOUT) {
+ val = readw(base + TSE_PCS_CONTROL_REG);
+ val &= TSE_PCS_SW_RST_MASK;
+ if (val == 0)
+ break;
+ counter++;
+ udelay(1);
+ }
+ if (counter >= TSE_PCS_SW_RESET_TIMEOUT) {
+ dev_err(pcs->dev, "PCS could not get out of sw reset\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs)
+{
+ int ret = 0;
+
+ writew(TSE_PCS_IF_USE_SGMII, base + TSE_PCS_IF_MODE_REG);
+
+ writew(TSE_PCS_CTRL_AUTONEG_SGMII, base + TSE_PCS_CONTROL_REG);
+
+ writew(TSE_PCS_SGMII_LINK_TIMER_0, base + TSE_PCS_LINK_TIMER_0_REG);
+ writew(TSE_PCS_SGMII_LINK_TIMER_1, base + TSE_PCS_LINK_TIMER_1_REG);
+
+ ret = tse_pcs_reset(base, pcs);
+ if (ret == 0)
+ writew(SGMII_ADAPTER_ENABLE,
+ pcs->sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+
+ return ret;
+}
+
+static void pcs_link_timer_callback(struct tse_pcs *pcs)
+{
+ u16 val = 0;
+ void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+ void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
+
+ val = readw(tse_pcs_base + TSE_PCS_STATUS_REG);
+ val &= TSE_PCS_STATUS_LINK_MASK;
+
+ if (val != 0) {
+ dev_dbg(pcs->dev, "Adapter: Link is established\n");
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ } else {
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ }
+}
+
+static void auto_nego_timer_callback(struct tse_pcs *pcs)
+{
+ u16 val = 0;
+ u16 speed = 0;
+ u16 duplex = 0;
+ void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+ void __iomem *sgmii_adapter_base = pcs->sgmii_adapter_base;
+
+ val = readw(tse_pcs_base + TSE_PCS_STATUS_REG);
+ val &= TSE_PCS_STATUS_AN_COMPLETED_MASK;
+
+ if (val != 0) {
+ dev_dbg(pcs->dev, "Adapter: Auto Negotiation is completed\n");
+ val = readw(tse_pcs_base + TSE_PCS_PARTNER_ABILITY_REG);
+ speed = val & TSE_PCS_PARTNER_SPEED_MASK;
+ duplex = val & TSE_PCS_PARTNER_DUPLEX_MASK;
+
+ if (speed == TSE_PCS_PARTNER_SPEED_10 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_FULL)
+ dev_dbg(pcs->dev,
+ "Adapter: Link Partner is Up - 10/Full\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_100 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_FULL)
+ dev_dbg(pcs->dev,
+ "Adapter: Link Partner is Up - 100/Full\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_1000 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_FULL)
+ dev_dbg(pcs->dev,
+ "Adapter: Link Partner is Up - 1000/Full\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_10 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_HALF)
+ dev_err(pcs->dev,
+ "Adapter does not support Half Duplex\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_100 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_HALF)
+ dev_err(pcs->dev,
+ "Adapter does not support Half Duplex\n");
+ else if (speed == TSE_PCS_PARTNER_SPEED_1000 &&
+ duplex == TSE_PCS_PARTNER_DUPLEX_HALF)
+ dev_err(pcs->dev,
+ "Adapter does not support Half Duplex\n");
+ else
+ dev_err(pcs->dev,
+ "Adapter: Invalid Partner Speed and Duplex\n");
+
+ if (duplex == TSE_PCS_PARTNER_DUPLEX_FULL &&
+ (speed == TSE_PCS_PARTNER_SPEED_10 ||
+ speed == TSE_PCS_PARTNER_SPEED_100 ||
+ speed == TSE_PCS_PARTNER_SPEED_1000))
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ } else {
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_CONTROL_RESTART_AN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG);
+
+ tse_pcs_reset(tse_pcs_base, pcs);
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ }
+}
+
+static void aneg_link_timer_callback(struct timer_list *t)
+{
+ struct tse_pcs *pcs = from_timer(pcs, t, aneg_link_timer);
+
+ if (pcs->autoneg == AUTONEG_ENABLE)
+ auto_nego_timer_callback(pcs);
+ else if (pcs->autoneg == AUTONEG_DISABLE)
+ pcs_link_timer_callback(pcs);
+}
+
+void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
+ unsigned int speed)
+{
+ void __iomem *tse_pcs_base = pcs->tse_pcs_base;
+ u32 val;
+
+ pcs->autoneg = phy_dev->autoneg;
+
+ if (phy_dev->autoneg == AUTONEG_ENABLE) {
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_CONTROL_AN_EN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG);
+ val |= TSE_PCS_USE_SGMII_AN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val |= TSE_PCS_CONTROL_RESTART_AN_MASK;
+
+ tse_pcs_reset(tse_pcs_base, pcs);
+
+ timer_setup(&pcs->aneg_link_timer, aneg_link_timer_callback,
+ 0);
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ } else if (phy_dev->autoneg == AUTONEG_DISABLE) {
+ val = readw(tse_pcs_base + TSE_PCS_CONTROL_REG);
+ val &= ~TSE_PCS_CONTROL_AN_EN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_CONTROL_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG);
+ val &= ~TSE_PCS_USE_SGMII_AN_MASK;
+ writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG);
+
+ val = readw(tse_pcs_base + TSE_PCS_IF_MODE_REG);
+ val &= ~TSE_PCS_SGMII_SPEED_MASK;
+
+ switch (speed) {
+ case 1000:
+ val |= TSE_PCS_SGMII_SPEED_1000;
+ break;
+ case 100:
+ val |= TSE_PCS_SGMII_SPEED_100;
+ break;
+ case 10:
+ val |= TSE_PCS_SGMII_SPEED_10;
+ break;
+ default:
+ return;
+ }
+ writew(val, tse_pcs_base + TSE_PCS_IF_MODE_REG);
+
+ tse_pcs_reset(tse_pcs_base, pcs);
+
+ timer_setup(&pcs->aneg_link_timer, aneg_link_timer_callback,
+ 0);
+ mod_timer(&pcs->aneg_link_timer, jiffies +
+ msecs_to_jiffies(AUTONEGO_LINK_TIMER));
+ }
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
new file mode 100644
index 000000000..694ac25ef
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/altr_tse_pcs.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright Altera Corporation (C) 2016. All rights reserved.
+ *
+ * Author: Tien Hock Loh <thloh@altera.com>
+ */
+
+#ifndef __TSE_PCS_H__
+#define __TSE_PCS_H__
+
+#include <linux/phy.h>
+#include <linux/timer.h>
+
+#define SGMII_ADAPTER_CTRL_REG 0x00
+#define SGMII_ADAPTER_ENABLE 0x0000
+#define SGMII_ADAPTER_DISABLE 0x0001
+
+struct tse_pcs {
+ struct device *dev;
+ void __iomem *tse_pcs_base;
+ void __iomem *sgmii_adapter_base;
+ struct timer_list aneg_link_timer;
+ int autoneg;
+};
+
+int tse_pcs_init(void __iomem *base, struct tse_pcs *pcs);
+void tse_pcs_fix_mac_speed(struct tse_pcs *pcs, struct phy_device *phy_dev,
+ unsigned int speed);
+
+#endif /* __TSE_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
new file mode 100644
index 000000000..2e8744ac6
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -0,0 +1,168 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ Specialised functions for managing Chained mode
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "stmmac.h"
+
+static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
+{
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
+ unsigned int nopaged_len = skb_headlen(skb);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->cur_tx;
+ unsigned int bmax, des2;
+ unsigned int i = 1, len;
+ struct dma_desc *desc;
+
+ desc = tx_q->dma_tx + entry;
+
+ if (priv->plat->enh_desc)
+ bmax = BUF_SIZE_8KiB;
+ else
+ bmax = BUF_SIZE_2KiB;
+
+ len = nopaged_len - bmax;
+
+ des2 = dma_map_single(priv->device, skb->data,
+ bmax, DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
+ return -1;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = bmax;
+ /* do not close the descriptor and do not set own bit */
+ stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum, STMMAC_CHAIN_MODE,
+ 0, false, skb->len);
+
+ while (len != 0) {
+ tx_q->tx_skbuff[entry] = NULL;
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+ desc = tx_q->dma_tx + entry;
+
+ if (len > bmax) {
+ des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i),
+ bmax, DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
+ return -1;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = bmax;
+ stmmac_prepare_tx_desc(priv, desc, 0, bmax, csum,
+ STMMAC_CHAIN_MODE, 1, false, skb->len);
+ len -= bmax;
+ i++;
+ } else {
+ des2 = dma_map_single(priv->device,
+ (skb->data + bmax * i), len,
+ DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
+ return -1;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ /* last descriptor can be set now */
+ stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
+ STMMAC_CHAIN_MODE, 1, true, skb->len);
+ len = 0;
+ }
+ }
+
+ tx_q->cur_tx = entry;
+
+ return entry;
+}
+
+static unsigned int is_jumbo_frm(int len, int enh_desc)
+{
+ unsigned int ret = 0;
+
+ if ((enh_desc && (len > BUF_SIZE_8KiB)) ||
+ (!enh_desc && (len > BUF_SIZE_2KiB))) {
+ ret = 1;
+ }
+
+ return ret;
+}
+
+static void init_dma_chain(void *des, dma_addr_t phy_addr,
+ unsigned int size, unsigned int extend_desc)
+{
+ /*
+ * In chained mode the des3 points to the next element in the ring.
+ * The latest element has to point to the head.
+ */
+ int i;
+ dma_addr_t dma_phy = phy_addr;
+
+ if (extend_desc) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)des;
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_extended_desc);
+ p->basic.des3 = cpu_to_le32((unsigned int)dma_phy);
+ p++;
+ }
+ p->basic.des3 = cpu_to_le32((unsigned int)phy_addr);
+
+ } else {
+ struct dma_desc *p = (struct dma_desc *)des;
+ for (i = 0; i < (size - 1); i++) {
+ dma_phy += sizeof(struct dma_desc);
+ p->des3 = cpu_to_le32((unsigned int)dma_phy);
+ p++;
+ }
+ p->des3 = cpu_to_le32((unsigned int)phy_addr);
+ }
+}
+
+static void refill_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)priv_ptr;
+ struct stmmac_priv *priv = rx_q->priv_data;
+
+ if (priv->hwts_rx_en && !priv->extend_desc)
+ /* NOTE: Device will overwrite des3 with timestamp value if
+ * 1588-2002 time stamping is enabled, hence reinitialize it
+ * to keep explicit chaining in the descriptor.
+ */
+ p->des3 = cpu_to_le32((unsigned int)(rx_q->dma_rx_phy +
+ (((rx_q->dirty_rx) + 1) %
+ priv->dma_conf.dma_rx_size) *
+ sizeof(struct dma_desc)));
+}
+
+static void clean_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->dirty_tx;
+
+ if (tx_q->tx_skbuff_dma[entry].last_segment && !priv->extend_desc &&
+ priv->hwts_tx_en)
+ /* NOTE: Device will overwrite des3 with timestamp value if
+ * 1588-2002 time stamping is enabled, hence reinitialize it
+ * to keep explicit chaining in the descriptor.
+ */
+ p->des3 = cpu_to_le32((unsigned int)((tx_q->dma_tx_phy +
+ ((tx_q->dirty_tx + 1) %
+ priv->dma_conf.dma_tx_size))
+ * sizeof(struct dma_desc)));
+}
+
+const struct stmmac_mode_ops chain_mode_ops = {
+ .init = init_dma_chain,
+ .is_jumbo_frm = is_jumbo_frm,
+ .jumbo_frm = jumbo_frm,
+ .refill_desc3 = refill_desc3,
+ .clean_desc3 = clean_desc3,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
new file mode 100644
index 000000000..54bb072ae
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -0,0 +1,568 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*******************************************************************************
+ STMMAC Common Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __COMMON_H__
+#define __COMMON_H__
+
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/stmmac.h>
+#include <linux/phy.h>
+#include <linux/pcs/pcs-xpcs.h>
+#include <linux/module.h>
+#if IS_ENABLED(CONFIG_VLAN_8021Q)
+#define STMMAC_VLAN_TAG_USED
+#include <linux/if_vlan.h>
+#endif
+
+#include "descs.h"
+#include "hwif.h"
+#include "mmc.h"
+
+/* Synopsys Core versions */
+#define DWMAC_CORE_3_40 0x34
+#define DWMAC_CORE_3_50 0x35
+#define DWMAC_CORE_4_00 0x40
+#define DWMAC_CORE_4_10 0x41
+#define DWMAC_CORE_5_00 0x50
+#define DWMAC_CORE_5_10 0x51
+#define DWMAC_CORE_5_20 0x52
+#define DWXGMAC_CORE_2_10 0x21
+#define DWXLGMAC_CORE_2_00 0x20
+
+/* Device ID */
+#define DWXGMAC_ID 0x76
+#define DWXLGMAC_ID 0x27
+
+#define STMMAC_CHAN0 0 /* Always supported and default for all chips */
+
+/* TX and RX Descriptor Length, these need to be power of two.
+ * TX descriptor length less than 64 may cause transmit queue timed out error.
+ * RX descriptor length less than 64 may cause inconsistent Rx chain error.
+ */
+#define DMA_MIN_TX_SIZE 64
+#define DMA_MAX_TX_SIZE 1024
+#define DMA_DEFAULT_TX_SIZE 512
+#define DMA_MIN_RX_SIZE 64
+#define DMA_MAX_RX_SIZE 1024
+#define DMA_DEFAULT_RX_SIZE 512
+#define STMMAC_GET_ENTRY(x, size) ((x + 1) & (size - 1))
+
+#undef FRAME_FILTER_DEBUG
+/* #define FRAME_FILTER_DEBUG */
+
+struct stmmac_txq_stats {
+ unsigned long tx_pkt_n;
+ unsigned long tx_normal_irq_n;
+};
+
+struct stmmac_rxq_stats {
+ unsigned long rx_pkt_n;
+ unsigned long rx_normal_irq_n;
+};
+
+/* Extra statistic and debug information exposed by ethtool */
+struct stmmac_extra_stats {
+ /* Transmit errors */
+ unsigned long tx_underflow ____cacheline_aligned;
+ unsigned long tx_carrier;
+ unsigned long tx_losscarrier;
+ unsigned long vlan_tag;
+ unsigned long tx_deferred;
+ unsigned long tx_vlan;
+ unsigned long tx_jabber;
+ unsigned long tx_frame_flushed;
+ unsigned long tx_payload_error;
+ unsigned long tx_ip_header_error;
+ /* Receive errors */
+ unsigned long rx_desc;
+ unsigned long sa_filter_fail;
+ unsigned long overflow_error;
+ unsigned long ipc_csum_error;
+ unsigned long rx_collision;
+ unsigned long rx_crc_errors;
+ unsigned long dribbling_bit;
+ unsigned long rx_length;
+ unsigned long rx_mii;
+ unsigned long rx_multicast;
+ unsigned long rx_gmac_overflow;
+ unsigned long rx_watchdog;
+ unsigned long da_rx_filter_fail;
+ unsigned long sa_rx_filter_fail;
+ unsigned long rx_missed_cntr;
+ unsigned long rx_overflow_cntr;
+ unsigned long rx_vlan;
+ unsigned long rx_split_hdr_pkt_n;
+ /* Tx/Rx IRQ error info */
+ unsigned long tx_undeflow_irq;
+ unsigned long tx_process_stopped_irq;
+ unsigned long tx_jabber_irq;
+ unsigned long rx_overflow_irq;
+ unsigned long rx_buf_unav_irq;
+ unsigned long rx_process_stopped_irq;
+ unsigned long rx_watchdog_irq;
+ unsigned long tx_early_irq;
+ unsigned long fatal_bus_error_irq;
+ /* Tx/Rx IRQ Events */
+ unsigned long rx_early_irq;
+ unsigned long threshold;
+ unsigned long tx_pkt_n;
+ unsigned long rx_pkt_n;
+ unsigned long normal_irq_n;
+ unsigned long rx_normal_irq_n;
+ unsigned long napi_poll;
+ unsigned long tx_normal_irq_n;
+ unsigned long tx_clean;
+ unsigned long tx_set_ic_bit;
+ unsigned long irq_receive_pmt_irq_n;
+ /* MMC info */
+ unsigned long mmc_tx_irq_n;
+ unsigned long mmc_rx_irq_n;
+ unsigned long mmc_rx_csum_offload_irq_n;
+ /* EEE */
+ unsigned long irq_tx_path_in_lpi_mode_n;
+ unsigned long irq_tx_path_exit_lpi_mode_n;
+ unsigned long irq_rx_path_in_lpi_mode_n;
+ unsigned long irq_rx_path_exit_lpi_mode_n;
+ unsigned long phy_eee_wakeup_error_n;
+ /* Extended RDES status */
+ unsigned long ip_hdr_err;
+ unsigned long ip_payload_err;
+ unsigned long ip_csum_bypassed;
+ unsigned long ipv4_pkt_rcvd;
+ unsigned long ipv6_pkt_rcvd;
+ unsigned long no_ptp_rx_msg_type_ext;
+ unsigned long ptp_rx_msg_type_sync;
+ unsigned long ptp_rx_msg_type_follow_up;
+ unsigned long ptp_rx_msg_type_delay_req;
+ unsigned long ptp_rx_msg_type_delay_resp;
+ unsigned long ptp_rx_msg_type_pdelay_req;
+ unsigned long ptp_rx_msg_type_pdelay_resp;
+ unsigned long ptp_rx_msg_type_pdelay_follow_up;
+ unsigned long ptp_rx_msg_type_announce;
+ unsigned long ptp_rx_msg_type_management;
+ unsigned long ptp_rx_msg_pkt_reserved_type;
+ unsigned long ptp_frame_type;
+ unsigned long ptp_ver;
+ unsigned long timestamp_dropped;
+ unsigned long av_pkt_rcvd;
+ unsigned long av_tagged_pkt_rcvd;
+ unsigned long vlan_tag_priority_val;
+ unsigned long l3_filter_match;
+ unsigned long l4_filter_match;
+ unsigned long l3_l4_filter_no_match;
+ /* PCS */
+ unsigned long irq_pcs_ane_n;
+ unsigned long irq_pcs_link_n;
+ unsigned long irq_rgmii_n;
+ unsigned long pcs_link;
+ unsigned long pcs_duplex;
+ unsigned long pcs_speed;
+ /* debug register */
+ unsigned long mtl_tx_status_fifo_full;
+ unsigned long mtl_tx_fifo_not_empty;
+ unsigned long mmtl_fifo_ctrl;
+ unsigned long mtl_tx_fifo_read_ctrl_write;
+ unsigned long mtl_tx_fifo_read_ctrl_wait;
+ unsigned long mtl_tx_fifo_read_ctrl_read;
+ unsigned long mtl_tx_fifo_read_ctrl_idle;
+ unsigned long mac_tx_in_pause;
+ unsigned long mac_tx_frame_ctrl_xfer;
+ unsigned long mac_tx_frame_ctrl_idle;
+ unsigned long mac_tx_frame_ctrl_wait;
+ unsigned long mac_tx_frame_ctrl_pause;
+ unsigned long mac_gmii_tx_proto_engine;
+ unsigned long mtl_rx_fifo_fill_level_full;
+ unsigned long mtl_rx_fifo_fill_above_thresh;
+ unsigned long mtl_rx_fifo_fill_below_thresh;
+ unsigned long mtl_rx_fifo_fill_level_empty;
+ unsigned long mtl_rx_fifo_read_ctrl_flush;
+ unsigned long mtl_rx_fifo_read_ctrl_read_data;
+ unsigned long mtl_rx_fifo_read_ctrl_status;
+ unsigned long mtl_rx_fifo_read_ctrl_idle;
+ unsigned long mtl_rx_fifo_ctrl_active;
+ unsigned long mac_rx_frame_ctrl_fifo;
+ unsigned long mac_gmii_rx_proto_engine;
+ /* TSO */
+ unsigned long tx_tso_frames;
+ unsigned long tx_tso_nfrags;
+ /* EST */
+ unsigned long mtl_est_cgce;
+ unsigned long mtl_est_hlbs;
+ unsigned long mtl_est_hlbf;
+ unsigned long mtl_est_btre;
+ unsigned long mtl_est_btrlm;
+ /* per queue statistics */
+ struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES];
+ struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES];
+};
+
+/* Safety Feature statistics exposed by ethtool */
+struct stmmac_safety_stats {
+ unsigned long mac_errors[32];
+ unsigned long mtl_errors[32];
+ unsigned long dma_errors[32];
+};
+
+/* Number of fields in Safety Stats */
+#define STMMAC_SAFETY_FEAT_SIZE \
+ (sizeof(struct stmmac_safety_stats) / sizeof(unsigned long))
+
+/* CSR Frequency Access Defines*/
+#define CSR_F_35M 35000000
+#define CSR_F_60M 60000000
+#define CSR_F_100M 100000000
+#define CSR_F_150M 150000000
+#define CSR_F_250M 250000000
+#define CSR_F_300M 300000000
+
+#define MAC_CSR_H_FRQ_MASK 0x20
+
+#define HASH_TABLE_SIZE 64
+#define PAUSE_TIME 0xffff
+
+/* Flow Control defines */
+#define FLOW_OFF 0
+#define FLOW_RX 1
+#define FLOW_TX 2
+#define FLOW_AUTO (FLOW_TX | FLOW_RX)
+
+/* PCS defines */
+#define STMMAC_PCS_RGMII (1 << 0)
+#define STMMAC_PCS_SGMII (1 << 1)
+#define STMMAC_PCS_TBI (1 << 2)
+#define STMMAC_PCS_RTBI (1 << 3)
+
+#define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */
+
+/* DAM HW feature register fields */
+#define DMA_HW_FEAT_MIISEL 0x00000001 /* 10/100 Mbps Support */
+#define DMA_HW_FEAT_GMIISEL 0x00000002 /* 1000 Mbps Support */
+#define DMA_HW_FEAT_HDSEL 0x00000004 /* Half-Duplex Support */
+#define DMA_HW_FEAT_EXTHASHEN 0x00000008 /* Expanded DA Hash Filter */
+#define DMA_HW_FEAT_HASHSEL 0x00000010 /* HASH Filter */
+#define DMA_HW_FEAT_ADDMAC 0x00000020 /* Multiple MAC Addr Reg */
+#define DMA_HW_FEAT_PCSSEL 0x00000040 /* PCS registers */
+#define DMA_HW_FEAT_L3L4FLTREN 0x00000080 /* Layer 3 & Layer 4 Feature */
+#define DMA_HW_FEAT_SMASEL 0x00000100 /* SMA(MDIO) Interface */
+#define DMA_HW_FEAT_RWKSEL 0x00000200 /* PMT Remote Wakeup */
+#define DMA_HW_FEAT_MGKSEL 0x00000400 /* PMT Magic Packet */
+#define DMA_HW_FEAT_MMCSEL 0x00000800 /* RMON Module */
+#define DMA_HW_FEAT_TSVER1SEL 0x00001000 /* Only IEEE 1588-2002 */
+#define DMA_HW_FEAT_TSVER2SEL 0x00002000 /* IEEE 1588-2008 PTPv2 */
+#define DMA_HW_FEAT_EEESEL 0x00004000 /* Energy Efficient Ethernet */
+#define DMA_HW_FEAT_AVSEL 0x00008000 /* AV Feature */
+#define DMA_HW_FEAT_TXCOESEL 0x00010000 /* Checksum Offload in Tx */
+#define DMA_HW_FEAT_RXTYP1COE 0x00020000 /* IP COE (Type 1) in Rx */
+#define DMA_HW_FEAT_RXTYP2COE 0x00040000 /* IP COE (Type 2) in Rx */
+#define DMA_HW_FEAT_RXFIFOSIZE 0x00080000 /* Rx FIFO > 2048 Bytes */
+#define DMA_HW_FEAT_RXCHCNT 0x00300000 /* No. additional Rx Channels */
+#define DMA_HW_FEAT_TXCHCNT 0x00c00000 /* No. additional Tx Channels */
+#define DMA_HW_FEAT_ENHDESSEL 0x01000000 /* Alternate Descriptor */
+/* Timestamping with Internal System Time */
+#define DMA_HW_FEAT_INTTSEN 0x02000000
+#define DMA_HW_FEAT_FLEXIPPSEN 0x04000000 /* Flexible PPS Output */
+#define DMA_HW_FEAT_SAVLANINS 0x08000000 /* Source Addr or VLAN */
+#define DMA_HW_FEAT_ACTPHYIF 0x70000000 /* Active/selected PHY iface */
+#define DEFAULT_DMA_PBL 8
+
+/* MSI defines */
+#define STMMAC_MSI_VEC_MAX 32
+
+/* PCS status and mask defines */
+#define PCS_ANE_IRQ BIT(2) /* PCS Auto-Negotiation */
+#define PCS_LINK_IRQ BIT(1) /* PCS Link */
+#define PCS_RGSMIIIS_IRQ BIT(0) /* RGMII or SMII Interrupt */
+
+/* Max/Min RI Watchdog Timer count value */
+#define MAX_DMA_RIWT 0xff
+#define MIN_DMA_RIWT 0x10
+#define DEF_DMA_RIWT 0xa0
+/* Tx coalesce parameters */
+#define STMMAC_COAL_TX_TIMER 1000
+#define STMMAC_MAX_COAL_TX_TICK 100000
+#define STMMAC_TX_MAX_FRAMES 256
+#define STMMAC_TX_FRAMES 25
+#define STMMAC_RX_FRAMES 0
+
+/* Packets types */
+enum packets_types {
+ PACKET_AVCPQ = 0x1, /* AV Untagged Control packets */
+ PACKET_PTPQ = 0x2, /* PTP Packets */
+ PACKET_DCBCPQ = 0x3, /* DCB Control Packets */
+ PACKET_UPQ = 0x4, /* Untagged Packets */
+ PACKET_MCBCQ = 0x5, /* Multicast & Broadcast Packets */
+};
+
+/* Rx IPC status */
+enum rx_frame_status {
+ good_frame = 0x0,
+ discard_frame = 0x1,
+ csum_none = 0x2,
+ llc_snap = 0x4,
+ dma_own = 0x8,
+ rx_not_ls = 0x10,
+};
+
+/* Tx status */
+enum tx_frame_status {
+ tx_done = 0x0,
+ tx_not_ls = 0x1,
+ tx_err = 0x2,
+ tx_dma_own = 0x4,
+ tx_err_bump_tc = 0x8,
+};
+
+enum dma_irq_status {
+ tx_hard_error = 0x1,
+ tx_hard_error_bump_tc = 0x2,
+ handle_rx = 0x4,
+ handle_tx = 0x8,
+};
+
+enum dma_irq_dir {
+ DMA_DIR_RX = 0x1,
+ DMA_DIR_TX = 0x2,
+ DMA_DIR_RXTX = 0x3,
+};
+
+enum request_irq_err {
+ REQ_IRQ_ERR_ALL,
+ REQ_IRQ_ERR_TX,
+ REQ_IRQ_ERR_RX,
+ REQ_IRQ_ERR_SFTY_UE,
+ REQ_IRQ_ERR_SFTY_CE,
+ REQ_IRQ_ERR_LPI,
+ REQ_IRQ_ERR_WOL,
+ REQ_IRQ_ERR_MAC,
+ REQ_IRQ_ERR_NO,
+};
+
+/* EEE and LPI defines */
+#define CORE_IRQ_TX_PATH_IN_LPI_MODE (1 << 0)
+#define CORE_IRQ_TX_PATH_EXIT_LPI_MODE (1 << 1)
+#define CORE_IRQ_RX_PATH_IN_LPI_MODE (1 << 2)
+#define CORE_IRQ_RX_PATH_EXIT_LPI_MODE (1 << 3)
+
+/* FPE defines */
+#define FPE_EVENT_UNKNOWN 0
+#define FPE_EVENT_TRSP BIT(0)
+#define FPE_EVENT_TVER BIT(1)
+#define FPE_EVENT_RRSP BIT(2)
+#define FPE_EVENT_RVER BIT(3)
+
+#define CORE_IRQ_MTL_RX_OVERFLOW BIT(8)
+
+/* Physical Coding Sublayer */
+struct rgmii_adv {
+ unsigned int pause;
+ unsigned int duplex;
+ unsigned int lp_pause;
+ unsigned int lp_duplex;
+};
+
+#define STMMAC_PCS_PAUSE 1
+#define STMMAC_PCS_ASYM_PAUSE 2
+
+/* DMA HW capabilities */
+struct dma_features {
+ unsigned int mbps_10_100;
+ unsigned int mbps_1000;
+ unsigned int half_duplex;
+ unsigned int hash_filter;
+ unsigned int multi_addr;
+ unsigned int pcs;
+ unsigned int sma_mdio;
+ unsigned int pmt_remote_wake_up;
+ unsigned int pmt_magic_frame;
+ unsigned int rmon;
+ /* IEEE 1588-2002 */
+ unsigned int time_stamp;
+ /* IEEE 1588-2008 */
+ unsigned int atime_stamp;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ unsigned int eee;
+ unsigned int av;
+ unsigned int hash_tb_sz;
+ unsigned int tsoen;
+ /* TX and RX csum */
+ unsigned int tx_coe;
+ unsigned int rx_coe;
+ unsigned int rx_coe_type1;
+ unsigned int rx_coe_type2;
+ unsigned int rxfifo_over_2048;
+ /* TX and RX number of channels */
+ unsigned int number_rx_channel;
+ unsigned int number_tx_channel;
+ /* TX and RX number of queues */
+ unsigned int number_rx_queues;
+ unsigned int number_tx_queues;
+ /* PPS output */
+ unsigned int pps_out_num;
+ /* Alternate (enhanced) DESC mode */
+ unsigned int enh_desc;
+ /* TX and RX FIFO sizes */
+ unsigned int tx_fifo_size;
+ unsigned int rx_fifo_size;
+ /* Automotive Safety Package */
+ unsigned int asp;
+ /* RX Parser */
+ unsigned int frpsel;
+ unsigned int frpbs;
+ unsigned int frpes;
+ unsigned int addr64;
+ unsigned int host_dma_width;
+ unsigned int rssen;
+ unsigned int vlhash;
+ unsigned int sphen;
+ unsigned int vlins;
+ unsigned int dvlan;
+ unsigned int l3l4fnum;
+ unsigned int arpoffsel;
+ /* TSN Features */
+ unsigned int estwid;
+ unsigned int estdep;
+ unsigned int estsel;
+ unsigned int fpesel;
+ unsigned int tbssel;
+ /* Numbers of Auxiliary Snapshot Inputs */
+ unsigned int aux_snapshot_n;
+};
+
+/* RX Buffer size must be multiple of 4/8/16 bytes */
+#define BUF_SIZE_16KiB 16368
+#define BUF_SIZE_8KiB 8188
+#define BUF_SIZE_4KiB 4096
+#define BUF_SIZE_2KiB 2048
+
+/* Power Down and WOL */
+#define PMT_NOT_SUPPORTED 0
+#define PMT_SUPPORTED 1
+
+/* Common MAC defines */
+#define MAC_CTRL_REG 0x00000000 /* MAC Control */
+#define MAC_ENABLE_TX 0x00000008 /* Transmitter Enable */
+#define MAC_ENABLE_RX 0x00000004 /* Receiver Enable */
+
+/* Default LPI timers */
+#define STMMAC_DEFAULT_LIT_LS 0x3E8
+#define STMMAC_DEFAULT_TWT_LS 0x1E
+#define STMMAC_ET_MAX 0xFFFFF
+
+#define STMMAC_CHAIN_MODE 0x1
+#define STMMAC_RING_MODE 0x2
+
+#define JUMBO_LEN 9000
+
+/* Receive Side Scaling */
+#define STMMAC_RSS_HASH_KEY_SIZE 40
+#define STMMAC_RSS_MAX_TABLE_SIZE 256
+
+/* VLAN */
+#define STMMAC_VLAN_NONE 0x0
+#define STMMAC_VLAN_REMOVE 0x1
+#define STMMAC_VLAN_INSERT 0x2
+#define STMMAC_VLAN_REPLACE 0x3
+
+extern const struct stmmac_desc_ops enh_desc_ops;
+extern const struct stmmac_desc_ops ndesc_ops;
+
+struct mac_device_info;
+
+extern const struct stmmac_hwtimestamp stmmac_ptp;
+extern const struct stmmac_mode_ops dwmac4_ring_mode_ops;
+
+struct mac_link {
+ u32 speed_mask;
+ u32 speed10;
+ u32 speed100;
+ u32 speed1000;
+ u32 speed2500;
+ u32 duplex;
+ struct {
+ u32 speed2500;
+ u32 speed5000;
+ u32 speed10000;
+ } xgmii;
+ struct {
+ u32 speed25000;
+ u32 speed40000;
+ u32 speed50000;
+ u32 speed100000;
+ } xlgmii;
+};
+
+struct mii_regs {
+ unsigned int addr; /* MII Address */
+ unsigned int data; /* MII Data */
+ unsigned int addr_shift; /* MII address shift */
+ unsigned int reg_shift; /* MII reg shift */
+ unsigned int addr_mask; /* MII address mask */
+ unsigned int reg_mask; /* MII reg mask */
+ unsigned int clk_csr_shift;
+ unsigned int clk_csr_mask;
+};
+
+struct mac_device_info {
+ const struct stmmac_ops *mac;
+ const struct stmmac_desc_ops *desc;
+ const struct stmmac_dma_ops *dma;
+ const struct stmmac_mode_ops *mode;
+ const struct stmmac_hwtimestamp *ptp;
+ const struct stmmac_tc_ops *tc;
+ const struct stmmac_mmc_ops *mmc;
+ struct dw_xpcs *xpcs;
+ struct mii_regs mii; /* MII register Addresses */
+ struct mac_link link;
+ void __iomem *pcsr; /* vpointer to device CSRs */
+ unsigned int multicast_filter_bins;
+ unsigned int unicast_filter_entries;
+ unsigned int mcast_bits_log2;
+ unsigned int rx_csum;
+ unsigned int pcs;
+ unsigned int pmt;
+ unsigned int ps;
+ unsigned int xlgmac;
+ unsigned int num_vlan;
+ u32 vlan_filter[32];
+ bool vlan_fail_q_en;
+ u8 vlan_fail_q;
+};
+
+struct stmmac_rx_routing {
+ u32 reg_mask;
+ u32 reg_shift;
+};
+
+int dwmac100_setup(struct stmmac_priv *priv);
+int dwmac1000_setup(struct stmmac_priv *priv);
+int dwmac4_setup(struct stmmac_priv *priv);
+int dwxgmac2_setup(struct stmmac_priv *priv);
+int dwxlgmac2_setup(struct stmmac_priv *priv);
+
+void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
+ unsigned int high, unsigned int low);
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
+void stmmac_set_mac(void __iomem *ioaddr, bool enable);
+
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
+ unsigned int high, unsigned int low);
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low);
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable);
+
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
+
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
+extern const struct stmmac_desc_ops dwmac4_desc_ops;
+
+#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs.h b/drivers/net/ethernet/stmicro/stmmac/descs.h
new file mode 100644
index 000000000..49d6a8662
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/descs.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*******************************************************************************
+ Header File to describe the DMA descriptors and related definitions.
+ This is for DWMAC100 and 1000 cores.
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __DESCS_H__
+#define __DESCS_H__
+
+#include <linux/bitops.h>
+
+/* Normal receive descriptor defines */
+
+/* RDES0 */
+#define RDES0_PAYLOAD_CSUM_ERR BIT(0)
+#define RDES0_CRC_ERROR BIT(1)
+#define RDES0_DRIBBLING BIT(2)
+#define RDES0_MII_ERROR BIT(3)
+#define RDES0_RECEIVE_WATCHDOG BIT(4)
+#define RDES0_FRAME_TYPE BIT(5)
+#define RDES0_COLLISION BIT(6)
+#define RDES0_IPC_CSUM_ERROR BIT(7)
+#define RDES0_LAST_DESCRIPTOR BIT(8)
+#define RDES0_FIRST_DESCRIPTOR BIT(9)
+#define RDES0_VLAN_TAG BIT(10)
+#define RDES0_OVERFLOW_ERROR BIT(11)
+#define RDES0_LENGTH_ERROR BIT(12)
+#define RDES0_SA_FILTER_FAIL BIT(13)
+#define RDES0_DESCRIPTOR_ERROR BIT(14)
+#define RDES0_ERROR_SUMMARY BIT(15)
+#define RDES0_FRAME_LEN_MASK GENMASK(29, 16)
+#define RDES0_FRAME_LEN_SHIFT 16
+#define RDES0_DA_FILTER_FAIL BIT(30)
+#define RDES0_OWN BIT(31)
+ /* RDES1 */
+#define RDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
+#define RDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
+#define RDES1_BUFFER2_SIZE_SHIFT 11
+#define RDES1_SECOND_ADDRESS_CHAINED BIT(24)
+#define RDES1_END_RING BIT(25)
+#define RDES1_DISABLE_IC BIT(31)
+
+/* Enhanced receive descriptor defines */
+
+/* RDES0 (similar to normal RDES) */
+#define ERDES0_RX_MAC_ADDR BIT(0)
+
+/* RDES1: completely differ from normal desc definitions */
+#define ERDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
+#define ERDES1_SECOND_ADDRESS_CHAINED BIT(14)
+#define ERDES1_END_RING BIT(15)
+#define ERDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
+#define ERDES1_BUFFER2_SIZE_SHIFT 16
+#define ERDES1_DISABLE_IC BIT(31)
+
+/* Normal transmit descriptor defines */
+/* TDES0 */
+#define TDES0_DEFERRED BIT(0)
+#define TDES0_UNDERFLOW_ERROR BIT(1)
+#define TDES0_EXCESSIVE_DEFERRAL BIT(2)
+#define TDES0_COLLISION_COUNT_MASK GENMASK(6, 3)
+#define TDES0_VLAN_FRAME BIT(7)
+#define TDES0_EXCESSIVE_COLLISIONS BIT(8)
+#define TDES0_LATE_COLLISION BIT(9)
+#define TDES0_NO_CARRIER BIT(10)
+#define TDES0_LOSS_CARRIER BIT(11)
+#define TDES0_PAYLOAD_ERROR BIT(12)
+#define TDES0_FRAME_FLUSHED BIT(13)
+#define TDES0_JABBER_TIMEOUT BIT(14)
+#define TDES0_ERROR_SUMMARY BIT(15)
+#define TDES0_IP_HEADER_ERROR BIT(16)
+#define TDES0_TIME_STAMP_STATUS BIT(17)
+#define TDES0_OWN ((u32)BIT(31)) /* silence sparse */
+/* TDES1 */
+#define TDES1_BUFFER1_SIZE_MASK GENMASK(10, 0)
+#define TDES1_BUFFER2_SIZE_MASK GENMASK(21, 11)
+#define TDES1_BUFFER2_SIZE_SHIFT 11
+#define TDES1_TIME_STAMP_ENABLE BIT(22)
+#define TDES1_DISABLE_PADDING BIT(23)
+#define TDES1_SECOND_ADDRESS_CHAINED BIT(24)
+#define TDES1_END_RING BIT(25)
+#define TDES1_CRC_DISABLE BIT(26)
+#define TDES1_CHECKSUM_INSERTION_MASK GENMASK(28, 27)
+#define TDES1_CHECKSUM_INSERTION_SHIFT 27
+#define TDES1_FIRST_SEGMENT BIT(29)
+#define TDES1_LAST_SEGMENT BIT(30)
+#define TDES1_INTERRUPT BIT(31)
+
+/* Enhanced transmit descriptor defines */
+/* TDES0 */
+#define ETDES0_DEFERRED BIT(0)
+#define ETDES0_UNDERFLOW_ERROR BIT(1)
+#define ETDES0_EXCESSIVE_DEFERRAL BIT(2)
+#define ETDES0_COLLISION_COUNT_MASK GENMASK(6, 3)
+#define ETDES0_VLAN_FRAME BIT(7)
+#define ETDES0_EXCESSIVE_COLLISIONS BIT(8)
+#define ETDES0_LATE_COLLISION BIT(9)
+#define ETDES0_NO_CARRIER BIT(10)
+#define ETDES0_LOSS_CARRIER BIT(11)
+#define ETDES0_PAYLOAD_ERROR BIT(12)
+#define ETDES0_FRAME_FLUSHED BIT(13)
+#define ETDES0_JABBER_TIMEOUT BIT(14)
+#define ETDES0_ERROR_SUMMARY BIT(15)
+#define ETDES0_IP_HEADER_ERROR BIT(16)
+#define ETDES0_TIME_STAMP_STATUS BIT(17)
+#define ETDES0_SECOND_ADDRESS_CHAINED BIT(20)
+#define ETDES0_END_RING BIT(21)
+#define ETDES0_CHECKSUM_INSERTION_MASK GENMASK(23, 22)
+#define ETDES0_CHECKSUM_INSERTION_SHIFT 22
+#define ETDES0_TIME_STAMP_ENABLE BIT(25)
+#define ETDES0_DISABLE_PADDING BIT(26)
+#define ETDES0_CRC_DISABLE BIT(27)
+#define ETDES0_FIRST_SEGMENT BIT(28)
+#define ETDES0_LAST_SEGMENT BIT(29)
+#define ETDES0_INTERRUPT BIT(30)
+#define ETDES0_OWN ((u32)BIT(31)) /* silence sparse */
+/* TDES1 */
+#define ETDES1_BUFFER1_SIZE_MASK GENMASK(12, 0)
+#define ETDES1_BUFFER2_SIZE_MASK GENMASK(28, 16)
+#define ETDES1_BUFFER2_SIZE_SHIFT 16
+
+/* Extended Receive descriptor definitions */
+#define ERDES4_IP_PAYLOAD_TYPE_MASK GENMASK(6, 2)
+#define ERDES4_IP_HDR_ERR BIT(3)
+#define ERDES4_IP_PAYLOAD_ERR BIT(4)
+#define ERDES4_IP_CSUM_BYPASSED BIT(5)
+#define ERDES4_IPV4_PKT_RCVD BIT(6)
+#define ERDES4_IPV6_PKT_RCVD BIT(7)
+#define ERDES4_MSG_TYPE_MASK GENMASK(11, 8)
+#define ERDES4_PTP_FRAME_TYPE BIT(12)
+#define ERDES4_PTP_VER BIT(13)
+#define ERDES4_TIMESTAMP_DROPPED BIT(14)
+#define ERDES4_AV_PKT_RCVD BIT(16)
+#define ERDES4_AV_TAGGED_PKT_RCVD BIT(17)
+#define ERDES4_VLAN_TAG_PRI_VAL_MASK GENMASK(20, 18)
+#define ERDES4_L3_FILTER_MATCH BIT(24)
+#define ERDES4_L4_FILTER_MATCH BIT(25)
+#define ERDES4_L3_L4_FILT_NO_MATCH_MASK GENMASK(27, 26)
+
+/* Extended RDES4 message type definitions */
+#define RDES_EXT_NO_PTP 0x0
+#define RDES_EXT_SYNC 0x1
+#define RDES_EXT_FOLLOW_UP 0x2
+#define RDES_EXT_DELAY_REQ 0x3
+#define RDES_EXT_DELAY_RESP 0x4
+#define RDES_EXT_PDELAY_REQ 0x5
+#define RDES_EXT_PDELAY_RESP 0x6
+#define RDES_EXT_PDELAY_FOLLOW_UP 0x7
+#define RDES_PTP_ANNOUNCE 0x8
+#define RDES_PTP_MANAGEMENT 0x9
+#define RDES_PTP_SIGNALING 0xa
+#define RDES_PTP_PKT_RESERVED_TYPE 0xf
+
+/* Basic descriptor structure for normal and alternate descriptors */
+struct dma_desc {
+ __le32 des0;
+ __le32 des1;
+ __le32 des2;
+ __le32 des3;
+};
+
+/* Extended descriptor structure (e.g. >= databook 3.50a) */
+struct dma_extended_desc {
+ struct dma_desc basic; /* Basic descriptors */
+ __le32 des4; /* Extended Status */
+ __le32 des5; /* Reserved */
+ __le32 des6; /* Tx/Rx Timestamp Low */
+ __le32 des7; /* Tx/Rx Timestamp High */
+};
+
+/* Enhanced descriptor for TBS */
+struct dma_edesc {
+ __le32 des4;
+ __le32 des5;
+ __le32 des6;
+ __le32 des7;
+ struct dma_desc basic;
+};
+
+/* Transmit checksum insertion control */
+#define TX_CIC_FULL 3 /* Include IP header and pseudoheader */
+
+#endif /* __DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/descs_com.h b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
new file mode 100644
index 000000000..40f7f2da9
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/descs_com.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*******************************************************************************
+ Header File to describe Normal/enhanced descriptor functions used for RING
+ and CHAINED modes.
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __DESC_COM_H__
+#define __DESC_COM_H__
+
+/* Specific functions used for Ring mode */
+
+/* Enhanced descriptors */
+static inline void ehn_desc_rx_set_on_ring(struct dma_desc *p, int end,
+ int bfsize)
+{
+ if (bfsize == BUF_SIZE_16KiB)
+ p->des1 |= cpu_to_le32((BUF_SIZE_8KiB
+ << ERDES1_BUFFER2_SIZE_SHIFT)
+ & ERDES1_BUFFER2_SIZE_MASK);
+
+ if (end)
+ p->des1 |= cpu_to_le32(ERDES1_END_RING);
+}
+
+static inline void enh_desc_end_tx_desc_on_ring(struct dma_desc *p, int end)
+{
+ if (end)
+ p->des0 |= cpu_to_le32(ETDES0_END_RING);
+ else
+ p->des0 &= cpu_to_le32(~ETDES0_END_RING);
+}
+
+static inline void enh_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
+{
+ if (unlikely(len > BUF_SIZE_4KiB)) {
+ p->des1 |= cpu_to_le32((((len - BUF_SIZE_4KiB)
+ << ETDES1_BUFFER2_SIZE_SHIFT)
+ & ETDES1_BUFFER2_SIZE_MASK) | (BUF_SIZE_4KiB
+ & ETDES1_BUFFER1_SIZE_MASK));
+ } else
+ p->des1 |= cpu_to_le32((len & ETDES1_BUFFER1_SIZE_MASK));
+}
+
+/* Normal descriptors */
+static inline void ndesc_rx_set_on_ring(struct dma_desc *p, int end, int bfsize)
+{
+ if (bfsize >= BUF_SIZE_2KiB) {
+ int bfsize2;
+
+ bfsize2 = min(bfsize - BUF_SIZE_2KiB + 1, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32((bfsize2 << RDES1_BUFFER2_SIZE_SHIFT)
+ & RDES1_BUFFER2_SIZE_MASK);
+ }
+
+ if (end)
+ p->des1 |= cpu_to_le32(RDES1_END_RING);
+}
+
+static inline void ndesc_end_tx_desc_on_ring(struct dma_desc *p, int end)
+{
+ if (end)
+ p->des1 |= cpu_to_le32(TDES1_END_RING);
+ else
+ p->des1 &= cpu_to_le32(~TDES1_END_RING);
+}
+
+static inline void norm_set_tx_desc_len_on_ring(struct dma_desc *p, int len)
+{
+ if (unlikely(len > BUF_SIZE_2KiB)) {
+ unsigned int buffer1 = (BUF_SIZE_2KiB - 1)
+ & TDES1_BUFFER1_SIZE_MASK;
+ p->des1 |= cpu_to_le32((((len - buffer1)
+ << TDES1_BUFFER2_SIZE_SHIFT)
+ & TDES1_BUFFER2_SIZE_MASK) | buffer1);
+ } else
+ p->des1 |= cpu_to_le32((len & TDES1_BUFFER1_SIZE_MASK));
+}
+
+/* Specific functions used for Chain mode */
+
+/* Enhanced descriptors */
+static inline void ehn_desc_rx_set_on_chain(struct dma_desc *p)
+{
+ p->des1 |= cpu_to_le32(ERDES1_SECOND_ADDRESS_CHAINED);
+}
+
+static inline void enh_desc_end_tx_desc_on_chain(struct dma_desc *p)
+{
+ p->des0 |= cpu_to_le32(ETDES0_SECOND_ADDRESS_CHAINED);
+}
+
+static inline void enh_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
+{
+ p->des1 |= cpu_to_le32(len & ETDES1_BUFFER1_SIZE_MASK);
+}
+
+/* Normal descriptors */
+static inline void ndesc_rx_set_on_chain(struct dma_desc *p, int end)
+{
+ p->des1 |= cpu_to_le32(RDES1_SECOND_ADDRESS_CHAINED);
+}
+
+static inline void ndesc_tx_set_on_chain(struct dma_desc *p)
+{
+ p->des1 |= cpu_to_le32(TDES1_SECOND_ADDRESS_CHAINED);
+}
+
+static inline void norm_set_tx_desc_len_on_chain(struct dma_desc *p, int len)
+{
+ p->des1 |= cpu_to_le32(len & TDES1_BUFFER1_SIZE_MASK);
+}
+#endif /* __DESC_COM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
new file mode 100644
index 000000000..dfbaea06d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-anarion.c
@@ -0,0 +1,155 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Adaptrum Anarion DWMAC glue layer
+ *
+ * Copyright (C) 2017, Adaptrum, Inc.
+ * (Written by Alexandru Gagniuc <alex.g at adaptrum.com> for Adaptrum, Inc.)
+ */
+
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define GMAC_RESET_CONTROL_REG 0
+#define GMAC_SW_CONFIG_REG 4
+#define GMAC_CONFIG_INTF_SEL_MASK (0x7 << 0)
+#define GMAC_CONFIG_INTF_RGMII (0x1 << 0)
+
+struct anarion_gmac {
+ uintptr_t ctl_block;
+ uint32_t phy_intf_sel;
+};
+
+static uint32_t gmac_read_reg(struct anarion_gmac *gmac, uint8_t reg)
+{
+ return readl((void *)(gmac->ctl_block + reg));
+};
+
+static void gmac_write_reg(struct anarion_gmac *gmac, uint8_t reg, uint32_t val)
+{
+ writel(val, (void *)(gmac->ctl_block + reg));
+}
+
+static int anarion_gmac_init(struct platform_device *pdev, void *priv)
+{
+ uint32_t sw_config;
+ struct anarion_gmac *gmac = priv;
+
+ /* Reset logic, configure interface mode, then release reset. SIMPLE! */
+ gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1);
+
+ sw_config = gmac_read_reg(gmac, GMAC_SW_CONFIG_REG);
+ sw_config &= ~GMAC_CONFIG_INTF_SEL_MASK;
+ sw_config |= (gmac->phy_intf_sel & GMAC_CONFIG_INTF_SEL_MASK);
+ gmac_write_reg(gmac, GMAC_SW_CONFIG_REG, sw_config);
+
+ gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 0);
+
+ return 0;
+}
+
+static void anarion_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct anarion_gmac *gmac = priv;
+
+ gmac_write_reg(gmac, GMAC_RESET_CONTROL_REG, 1);
+}
+
+static struct anarion_gmac *anarion_config_dt(struct platform_device *pdev)
+{
+ struct anarion_gmac *gmac;
+ phy_interface_t phy_mode;
+ void __iomem *ctl_block;
+ int err;
+
+ ctl_block = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(ctl_block)) {
+ dev_err(&pdev->dev, "Cannot get reset region (%ld)!\n",
+ PTR_ERR(ctl_block));
+ return ctl_block;
+ }
+
+ gmac = devm_kzalloc(&pdev->dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return ERR_PTR(-ENOMEM);
+
+ gmac->ctl_block = (uintptr_t)ctl_block;
+
+ err = of_get_phy_mode(pdev->dev.of_node, &phy_mode);
+ if (err)
+ return ERR_PTR(err);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ fallthrough;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ gmac->phy_intf_sel = GMAC_CONFIG_INTF_RGMII;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n",
+ phy_mode);
+ return ERR_PTR(-ENOTSUPP);
+ }
+
+ return gmac;
+}
+
+static int anarion_dwmac_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct anarion_gmac *gmac;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ gmac = anarion_config_dt(pdev);
+ if (IS_ERR(gmac))
+ return PTR_ERR(gmac);
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->init = anarion_gmac_init;
+ plat_dat->exit = anarion_gmac_exit;
+ anarion_gmac_init(pdev, gmac);
+ plat_dat->bsp_priv = gmac;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret) {
+ stmmac_remove_config_dt(pdev, plat_dat);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id anarion_dwmac_match[] = {
+ { .compatible = "adaptrum,anarion-gmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, anarion_dwmac_match);
+
+static struct platform_driver anarion_dwmac_driver = {
+ .probe = anarion_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "anarion-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = anarion_dwmac_match,
+ },
+};
+module_platform_driver(anarion_dwmac_driver);
+
+MODULE_DESCRIPTION("Adaptrum Anarion DWMAC specific glue layer");
+MODULE_AUTHOR("Alexandru Gagniuc <mr.nuke.me@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
new file mode 100644
index 000000000..80efdeeb0
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-dwc-qos-eth.c
@@ -0,0 +1,513 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
+ *
+ * Copyright (C) 2016 Joao Pinto <jpinto@synopsys.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+#include "dwmac4.h"
+
+struct tegra_eqos {
+ struct device *dev;
+ void __iomem *regs;
+
+ struct reset_control *rst;
+ struct clk *clk_master;
+ struct clk *clk_slave;
+ struct clk *clk_tx;
+ struct clk *clk_rx;
+
+ struct gpio_desc *reset;
+};
+
+static int dwc_eth_dwmac_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
+{
+ struct device *dev = &pdev->dev;
+ u32 burst_map = 0;
+ u32 bit_index = 0;
+ u32 a_index = 0;
+
+ if (!plat_dat->axi) {
+ plat_dat->axi = kzalloc(sizeof(struct stmmac_axi), GFP_KERNEL);
+
+ if (!plat_dat->axi)
+ return -ENOMEM;
+ }
+
+ plat_dat->axi->axi_lpi_en = device_property_read_bool(dev,
+ "snps,en-lpi");
+ if (device_property_read_u32(dev, "snps,write-requests",
+ &plat_dat->axi->axi_wr_osr_lmt)) {
+ /**
+ * Since the register has a reset value of 1, if property
+ * is missing, default to 1.
+ */
+ plat_dat->axi->axi_wr_osr_lmt = 1;
+ } else {
+ /**
+ * If property exists, to keep the behavior from dwc_eth_qos,
+ * subtract one after parsing.
+ */
+ plat_dat->axi->axi_wr_osr_lmt--;
+ }
+
+ if (device_property_read_u32(dev, "snps,read-requests",
+ &plat_dat->axi->axi_rd_osr_lmt)) {
+ /**
+ * Since the register has a reset value of 1, if property
+ * is missing, default to 1.
+ */
+ plat_dat->axi->axi_rd_osr_lmt = 1;
+ } else {
+ /**
+ * If property exists, to keep the behavior from dwc_eth_qos,
+ * subtract one after parsing.
+ */
+ plat_dat->axi->axi_rd_osr_lmt--;
+ }
+ device_property_read_u32(dev, "snps,burst-map", &burst_map);
+
+ /* converts burst-map bitmask to burst array */
+ for (bit_index = 0; bit_index < 7; bit_index++) {
+ if (burst_map & (1 << bit_index)) {
+ switch (bit_index) {
+ case 0:
+ plat_dat->axi->axi_blen[a_index] = 4; break;
+ case 1:
+ plat_dat->axi->axi_blen[a_index] = 8; break;
+ case 2:
+ plat_dat->axi->axi_blen[a_index] = 16; break;
+ case 3:
+ plat_dat->axi->axi_blen[a_index] = 32; break;
+ case 4:
+ plat_dat->axi->axi_blen[a_index] = 64; break;
+ case 5:
+ plat_dat->axi->axi_blen[a_index] = 128; break;
+ case 6:
+ plat_dat->axi->axi_blen[a_index] = 256; break;
+ default:
+ break;
+ }
+ a_index++;
+ }
+ }
+
+ /* dwc-qos needs GMAC4, AAL, TSO and PMT */
+ plat_dat->has_gmac4 = 1;
+ plat_dat->dma_cfg->aal = 1;
+ plat_dat->tso_en = 1;
+ plat_dat->pmt = 1;
+
+ return 0;
+}
+
+static int dwc_qos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *stmmac_res)
+{
+ int err;
+
+ plat_dat->stmmac_clk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(plat_dat->stmmac_clk)) {
+ dev_err(&pdev->dev, "apb_pclk clock not found.\n");
+ return PTR_ERR(plat_dat->stmmac_clk);
+ }
+
+ err = clk_prepare_enable(plat_dat->stmmac_clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable apb_pclk clock: %d\n",
+ err);
+ return err;
+ }
+
+ plat_dat->pclk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(plat_dat->pclk)) {
+ dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
+ err = PTR_ERR(plat_dat->pclk);
+ goto disable;
+ }
+
+ err = clk_prepare_enable(plat_dat->pclk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n",
+ err);
+ goto disable;
+ }
+
+ return 0;
+
+disable:
+ clk_disable_unprepare(plat_dat->stmmac_clk);
+ return err;
+}
+
+static int dwc_qos_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ clk_disable_unprepare(priv->plat->pclk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+
+ return 0;
+}
+
+#define SDMEMCOMPPADCTRL 0x8800
+#define SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD BIT(31)
+
+#define AUTO_CAL_CONFIG 0x8804
+#define AUTO_CAL_CONFIG_START BIT(31)
+#define AUTO_CAL_CONFIG_ENABLE BIT(29)
+
+#define AUTO_CAL_STATUS 0x880c
+#define AUTO_CAL_STATUS_ACTIVE BIT(31)
+
+static void tegra_eqos_fix_speed(void *priv, unsigned int speed)
+{
+ struct tegra_eqos *eqos = priv;
+ unsigned long rate = 125000000;
+ bool needs_calibration = false;
+ u32 value;
+ int err;
+
+ switch (speed) {
+ case SPEED_1000:
+ needs_calibration = true;
+ rate = 125000000;
+ break;
+
+ case SPEED_100:
+ needs_calibration = true;
+ rate = 25000000;
+ break;
+
+ case SPEED_10:
+ rate = 2500000;
+ break;
+
+ default:
+ dev_err(eqos->dev, "invalid speed %u\n", speed);
+ break;
+ }
+
+ if (needs_calibration) {
+ /* calibrate */
+ value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+ value |= SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+ writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+
+ udelay(1);
+
+ value = readl(eqos->regs + AUTO_CAL_CONFIG);
+ value |= AUTO_CAL_CONFIG_START | AUTO_CAL_CONFIG_ENABLE;
+ writel(value, eqos->regs + AUTO_CAL_CONFIG);
+
+ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+ value,
+ value & AUTO_CAL_STATUS_ACTIVE,
+ 1, 10);
+ if (err < 0) {
+ dev_err(eqos->dev, "calibration did not start\n");
+ goto failed;
+ }
+
+ err = readl_poll_timeout_atomic(eqos->regs + AUTO_CAL_STATUS,
+ value,
+ (value & AUTO_CAL_STATUS_ACTIVE) == 0,
+ 20, 200);
+ if (err < 0) {
+ dev_err(eqos->dev, "calibration didn't finish\n");
+ goto failed;
+ }
+
+ failed:
+ value = readl(eqos->regs + SDMEMCOMPPADCTRL);
+ value &= ~SDMEMCOMPPADCTRL_PAD_E_INPUT_OR_E_PWRD;
+ writel(value, eqos->regs + SDMEMCOMPPADCTRL);
+ } else {
+ value = readl(eqos->regs + AUTO_CAL_CONFIG);
+ value &= ~AUTO_CAL_CONFIG_ENABLE;
+ writel(value, eqos->regs + AUTO_CAL_CONFIG);
+ }
+
+ err = clk_set_rate(eqos->clk_tx, rate);
+ if (err < 0)
+ dev_err(eqos->dev, "failed to set TX rate: %d\n", err);
+}
+
+static int tegra_eqos_init(struct platform_device *pdev, void *priv)
+{
+ struct tegra_eqos *eqos = priv;
+ unsigned long rate;
+ u32 value;
+
+ rate = clk_get_rate(eqos->clk_slave);
+
+ value = (rate / 1000000) - 1;
+ writel(value, eqos->regs + GMAC_1US_TIC_COUNTER);
+
+ return 0;
+}
+
+static int tegra_eqos_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res)
+{
+ struct device *dev = &pdev->dev;
+ struct tegra_eqos *eqos;
+ int err;
+
+ eqos = devm_kzalloc(&pdev->dev, sizeof(*eqos), GFP_KERNEL);
+ if (!eqos)
+ return -ENOMEM;
+
+ eqos->dev = &pdev->dev;
+ eqos->regs = res->addr;
+
+ if (!is_of_node(dev->fwnode))
+ goto bypass_clk_reset_gpio;
+
+ eqos->clk_master = devm_clk_get(&pdev->dev, "master_bus");
+ if (IS_ERR(eqos->clk_master)) {
+ err = PTR_ERR(eqos->clk_master);
+ goto error;
+ }
+
+ err = clk_prepare_enable(eqos->clk_master);
+ if (err < 0)
+ goto error;
+
+ eqos->clk_slave = devm_clk_get(&pdev->dev, "slave_bus");
+ if (IS_ERR(eqos->clk_slave)) {
+ err = PTR_ERR(eqos->clk_slave);
+ goto disable_master;
+ }
+
+ data->stmmac_clk = eqos->clk_slave;
+
+ err = clk_prepare_enable(eqos->clk_slave);
+ if (err < 0)
+ goto disable_master;
+
+ eqos->clk_rx = devm_clk_get(&pdev->dev, "rx");
+ if (IS_ERR(eqos->clk_rx)) {
+ err = PTR_ERR(eqos->clk_rx);
+ goto disable_slave;
+ }
+
+ err = clk_prepare_enable(eqos->clk_rx);
+ if (err < 0)
+ goto disable_slave;
+
+ eqos->clk_tx = devm_clk_get(&pdev->dev, "tx");
+ if (IS_ERR(eqos->clk_tx)) {
+ err = PTR_ERR(eqos->clk_tx);
+ goto disable_rx;
+ }
+
+ err = clk_prepare_enable(eqos->clk_tx);
+ if (err < 0)
+ goto disable_rx;
+
+ eqos->reset = devm_gpiod_get(&pdev->dev, "phy-reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(eqos->reset)) {
+ err = PTR_ERR(eqos->reset);
+ goto disable_tx;
+ }
+
+ usleep_range(2000, 4000);
+ gpiod_set_value(eqos->reset, 0);
+
+ /* MDIO bus was already reset just above */
+ data->mdio_bus_data->needs_reset = false;
+
+ eqos->rst = devm_reset_control_get(&pdev->dev, "eqos");
+ if (IS_ERR(eqos->rst)) {
+ err = PTR_ERR(eqos->rst);
+ goto reset_phy;
+ }
+
+ err = reset_control_assert(eqos->rst);
+ if (err < 0)
+ goto reset_phy;
+
+ usleep_range(2000, 4000);
+
+ err = reset_control_deassert(eqos->rst);
+ if (err < 0)
+ goto reset_phy;
+
+ usleep_range(2000, 4000);
+
+bypass_clk_reset_gpio:
+ data->fix_mac_speed = tegra_eqos_fix_speed;
+ data->init = tegra_eqos_init;
+ data->bsp_priv = eqos;
+ data->sph_disable = 1;
+
+ err = tegra_eqos_init(pdev, eqos);
+ if (err < 0)
+ goto reset;
+
+ return 0;
+reset:
+ reset_control_assert(eqos->rst);
+reset_phy:
+ gpiod_set_value(eqos->reset, 1);
+disable_tx:
+ clk_disable_unprepare(eqos->clk_tx);
+disable_rx:
+ clk_disable_unprepare(eqos->clk_rx);
+disable_slave:
+ clk_disable_unprepare(eqos->clk_slave);
+disable_master:
+ clk_disable_unprepare(eqos->clk_master);
+error:
+ return err;
+}
+
+static int tegra_eqos_remove(struct platform_device *pdev)
+{
+ struct tegra_eqos *eqos = get_stmmac_bsp_priv(&pdev->dev);
+
+ reset_control_assert(eqos->rst);
+ gpiod_set_value(eqos->reset, 1);
+ clk_disable_unprepare(eqos->clk_tx);
+ clk_disable_unprepare(eqos->clk_rx);
+ clk_disable_unprepare(eqos->clk_slave);
+ clk_disable_unprepare(eqos->clk_master);
+
+ return 0;
+}
+
+struct dwc_eth_dwmac_data {
+ int (*probe)(struct platform_device *pdev,
+ struct plat_stmmacenet_data *data,
+ struct stmmac_resources *res);
+ int (*remove)(struct platform_device *pdev);
+};
+
+static const struct dwc_eth_dwmac_data dwc_qos_data = {
+ .probe = dwc_qos_probe,
+ .remove = dwc_qos_remove,
+};
+
+static const struct dwc_eth_dwmac_data tegra_eqos_data = {
+ .probe = tegra_eqos_probe,
+ .remove = tegra_eqos_remove,
+};
+
+static int dwc_eth_dwmac_probe(struct platform_device *pdev)
+{
+ const struct dwc_eth_dwmac_data *data;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ int ret;
+
+ data = device_get_match_data(&pdev->dev);
+
+ memset(&stmmac_res, 0, sizeof(struct stmmac_resources));
+
+ /**
+ * Since stmmac_platform supports name IRQ only, basic platform
+ * resource initialization is done in the glue logic.
+ */
+ stmmac_res.irq = platform_get_irq(pdev, 0);
+ if (stmmac_res.irq < 0)
+ return stmmac_res.irq;
+ stmmac_res.wol_irq = stmmac_res.irq;
+
+ stmmac_res.addr = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(stmmac_res.addr))
+ return PTR_ERR(stmmac_res.addr);
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ ret = data->probe(pdev, plat_dat, &stmmac_res);
+ if (ret < 0) {
+ dev_err_probe(&pdev->dev, ret, "failed to probe subdriver\n");
+
+ goto remove_config;
+ }
+
+ ret = dwc_eth_dwmac_config_dt(pdev, plat_dat);
+ if (ret)
+ goto remove;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto remove;
+
+ return ret;
+
+remove:
+ data->remove(pdev);
+remove_config:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int dwc_eth_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ const struct dwc_eth_dwmac_data *data;
+ int err;
+
+ data = device_get_match_data(&pdev->dev);
+
+ err = stmmac_dvr_remove(&pdev->dev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+
+ err = data->remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove subdriver: %d\n", err);
+
+ stmmac_remove_config_dt(pdev, priv->plat);
+
+ return err;
+}
+
+static const struct of_device_id dwc_eth_dwmac_match[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10", .data = &dwc_qos_data },
+ { .compatible = "nvidia,tegra186-eqos", .data = &tegra_eqos_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, dwc_eth_dwmac_match);
+
+static struct platform_driver dwc_eth_dwmac_driver = {
+ .probe = dwc_eth_dwmac_probe,
+ .remove = dwc_eth_dwmac_remove,
+ .driver = {
+ .name = "dwc-eth-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = dwc_eth_dwmac_match,
+ },
+};
+module_platform_driver(dwc_eth_dwmac_driver);
+
+MODULE_AUTHOR("Joao Pinto <jpinto@synopsys.com>");
+MODULE_DESCRIPTION("Synopsys DWC Ethernet Quality-of-Service v4.10a driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
new file mode 100644
index 000000000..5e731a72c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-generic.c
@@ -0,0 +1,100 @@
+/*
+ * Generic DWMAC platform driver
+ *
+ * Copyright (C) 2007-2011 STMicroelectronics Ltd
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+static int dwmac_generic_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ if (pdev->dev.of_node) {
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+ } else {
+ plat_dat = dev_get_platdata(&pdev->dev);
+ if (!plat_dat) {
+ dev_err(&pdev->dev, "no platform data provided\n");
+ return -EINVAL;
+ }
+
+ /* Set default value for multicast hash bins */
+ plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat_dat->unicast_filter_entries = 1;
+ }
+
+ /* Custom initialisation (if needed) */
+ if (plat_dat->init) {
+ ret = plat_dat->init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ goto err_remove_config_dt;
+ }
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_exit;
+
+ return 0;
+
+err_exit:
+ if (plat_dat->exit)
+ plat_dat->exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+ if (pdev->dev.of_node)
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static const struct of_device_id dwmac_generic_match[] = {
+ { .compatible = "st,spear600-gmac"},
+ { .compatible = "snps,dwmac-3.40a"},
+ { .compatible = "snps,dwmac-3.50a"},
+ { .compatible = "snps,dwmac-3.610"},
+ { .compatible = "snps,dwmac-3.70a"},
+ { .compatible = "snps,dwmac-3.710"},
+ { .compatible = "snps,dwmac-4.00"},
+ { .compatible = "snps,dwmac-4.10a"},
+ { .compatible = "snps,dwmac"},
+ { .compatible = "snps,dwxgmac-2.10"},
+ { .compatible = "snps,dwxgmac"},
+ { }
+};
+MODULE_DEVICE_TABLE(of, dwmac_generic_match);
+
+static struct platform_driver dwmac_generic_driver = {
+ .probe = dwmac_generic_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(dwmac_generic_match),
+ },
+};
+module_platform_driver(dwmac_generic_driver);
+
+MODULE_DESCRIPTION("Generic dwmac driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
new file mode 100644
index 000000000..0d6a84199
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c
@@ -0,0 +1,319 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwmac-imx.c - DWMAC Specific Glue layer for NXP imx8
+ *
+ * Copyright 2020 NXP
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define GPR_ENET_QOS_INTF_MODE_MASK GENMASK(21, 16)
+#define GPR_ENET_QOS_INTF_SEL_MII (0x0 << 16)
+#define GPR_ENET_QOS_INTF_SEL_RMII (0x4 << 16)
+#define GPR_ENET_QOS_INTF_SEL_RGMII (0x1 << 16)
+#define GPR_ENET_QOS_CLK_GEN_EN (0x1 << 19)
+#define GPR_ENET_QOS_CLK_TX_CLK_SEL (0x1 << 20)
+#define GPR_ENET_QOS_RGMII_EN (0x1 << 21)
+
+struct imx_dwmac_ops {
+ u32 addr_width;
+ bool mac_rgmii_txclk_auto_adj;
+
+ int (*set_intf_mode)(struct plat_stmmacenet_data *plat_dat);
+};
+
+struct imx_priv_data {
+ struct device *dev;
+ struct clk *clk_tx;
+ struct clk *clk_mem;
+ struct regmap *intf_regmap;
+ u32 intf_reg_off;
+ bool rmii_refclk_ext;
+
+ const struct imx_dwmac_ops *ops;
+ struct plat_stmmacenet_data *plat_dat;
+};
+
+static int imx8mp_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct imx_priv_data *dwmac = plat_dat->bsp_priv;
+ int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = GPR_ENET_QOS_INTF_SEL_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = GPR_ENET_QOS_INTF_SEL_RMII;
+ val |= (dwmac->rmii_refclk_ext ? 0 : GPR_ENET_QOS_CLK_TX_CLK_SEL);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = GPR_ENET_QOS_INTF_SEL_RGMII |
+ GPR_ENET_QOS_RGMII_EN;
+ break;
+ default:
+ pr_debug("imx dwmac doesn't support %d interface\n",
+ plat_dat->interface);
+ return -EINVAL;
+ }
+
+ val |= GPR_ENET_QOS_CLK_GEN_EN;
+ return regmap_update_bits(dwmac->intf_regmap, dwmac->intf_reg_off,
+ GPR_ENET_QOS_INTF_MODE_MASK, val);
+};
+
+static int
+imx8dxl_set_intf_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ int ret = 0;
+
+ /* TBD: depends on imx8dxl scu interfaces to be upstreamed */
+ return ret;
+}
+
+static int imx_dwmac_clks_config(void *priv, bool enabled)
+{
+ struct imx_priv_data *dwmac = priv;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(dwmac->clk_mem);
+ if (ret) {
+ dev_err(dwmac->dev, "mem clock enable failed\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwmac->clk_tx);
+ if (ret) {
+ dev_err(dwmac->dev, "tx clock enable failed\n");
+ clk_disable_unprepare(dwmac->clk_mem);
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_mem);
+ }
+
+ return ret;
+}
+
+static int imx_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct imx_priv_data *dwmac = priv;
+ int ret;
+
+ plat_dat = dwmac->plat_dat;
+
+ if (dwmac->ops->set_intf_mode) {
+ ret = dwmac->ops->set_intf_mode(plat_dat);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void imx_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ /* nothing to do now */
+}
+
+static void imx_dwmac_fix_speed(void *priv, unsigned int speed)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct imx_priv_data *dwmac = priv;
+ unsigned long rate;
+ int err;
+
+ plat_dat = dwmac->plat_dat;
+
+ if (dwmac->ops->mac_rgmii_txclk_auto_adj ||
+ (plat_dat->interface == PHY_INTERFACE_MODE_RMII) ||
+ (plat_dat->interface == PHY_INTERFACE_MODE_MII))
+ return;
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+ case SPEED_100:
+ rate = 25000000;
+ break;
+ case SPEED_10:
+ rate = 2500000;
+ break;
+ default:
+ dev_err(dwmac->dev, "invalid speed %u\n", speed);
+ return;
+ }
+
+ err = clk_set_rate(dwmac->clk_tx, rate);
+ if (err < 0)
+ dev_err(dwmac->dev, "failed to set tx rate %lu\n", rate);
+}
+
+static int
+imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int err = 0;
+
+ if (of_get_property(np, "snps,rmii_refclk_ext", NULL))
+ dwmac->rmii_refclk_ext = true;
+
+ dwmac->clk_tx = devm_clk_get(dev, "tx");
+ if (IS_ERR(dwmac->clk_tx)) {
+ dev_err(dev, "failed to get tx clock\n");
+ return PTR_ERR(dwmac->clk_tx);
+ }
+
+ dwmac->clk_mem = NULL;
+ if (of_machine_is_compatible("fsl,imx8dxl")) {
+ dwmac->clk_mem = devm_clk_get(dev, "mem");
+ if (IS_ERR(dwmac->clk_mem)) {
+ dev_err(dev, "failed to get mem clock\n");
+ return PTR_ERR(dwmac->clk_mem);
+ }
+ }
+
+ if (of_machine_is_compatible("fsl,imx8mp")) {
+ /* Binding doc describes the property:
+ is required by i.MX8MP.
+ is optional for i.MX8DXL.
+ */
+ dwmac->intf_regmap = syscon_regmap_lookup_by_phandle(np, "intf_mode");
+ if (IS_ERR(dwmac->intf_regmap))
+ return PTR_ERR(dwmac->intf_regmap);
+
+ err = of_property_read_u32_index(np, "intf_mode", 1, &dwmac->intf_reg_off);
+ if (err) {
+ dev_err(dev, "Can't get intf mode reg offset (%d)\n", err);
+ return err;
+ }
+ }
+
+ return err;
+}
+
+static int imx_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct imx_priv_data *dwmac;
+ const struct imx_dwmac_ops *data;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return -ENOMEM;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "failed to get match data\n");
+ ret = -EINVAL;
+ goto err_match_data;
+ }
+
+ dwmac->ops = data;
+ dwmac->dev = &pdev->dev;
+
+ ret = imx_dwmac_parse_dt(dwmac, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to parse OF data\n");
+ goto err_parse_dt;
+ }
+
+ plat_dat->host_dma_width = dwmac->ops->addr_width;
+ plat_dat->init = imx_dwmac_init;
+ plat_dat->exit = imx_dwmac_exit;
+ plat_dat->clks_config = imx_dwmac_clks_config;
+ plat_dat->fix_mac_speed = imx_dwmac_fix_speed;
+ plat_dat->bsp_priv = dwmac;
+ dwmac->plat_dat = plat_dat;
+
+ ret = imx_dwmac_clks_config(dwmac, true);
+ if (ret)
+ goto err_clks_config;
+
+ ret = imx_dwmac_init(pdev, dwmac);
+ if (ret)
+ goto err_dwmac_init;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_drv_probe;
+
+ return 0;
+
+err_drv_probe:
+ imx_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_dwmac_init:
+ imx_dwmac_clks_config(dwmac, false);
+err_clks_config:
+err_parse_dt:
+err_match_data:
+ stmmac_remove_config_dt(pdev, plat_dat);
+ return ret;
+}
+
+static struct imx_dwmac_ops imx8mp_dwmac_data = {
+ .addr_width = 34,
+ .mac_rgmii_txclk_auto_adj = false,
+ .set_intf_mode = imx8mp_set_intf_mode,
+};
+
+static struct imx_dwmac_ops imx8dxl_dwmac_data = {
+ .addr_width = 32,
+ .mac_rgmii_txclk_auto_adj = true,
+ .set_intf_mode = imx8dxl_set_intf_mode,
+};
+
+static const struct of_device_id imx_dwmac_match[] = {
+ { .compatible = "nxp,imx8mp-dwmac-eqos", .data = &imx8mp_dwmac_data },
+ { .compatible = "nxp,imx8dxl-dwmac-eqos", .data = &imx8dxl_dwmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, imx_dwmac_match);
+
+static struct platform_driver imx_dwmac_driver = {
+ .probe = imx_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "imx-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = imx_dwmac_match,
+ },
+};
+module_platform_driver(imx_dwmac_driver);
+
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("NXP imx8 DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
new file mode 100644
index 000000000..378b4dd82
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ingenic.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dwmac-ingenic.c - Ingenic SoCs DWMAC specific glue layer
+ *
+ * Copyright (c) 2021 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define MACPHYC_TXCLK_SEL_MASK GENMASK(31, 31)
+#define MACPHYC_TXCLK_SEL_OUTPUT 0x1
+#define MACPHYC_TXCLK_SEL_INPUT 0x0
+#define MACPHYC_MODE_SEL_MASK GENMASK(31, 31)
+#define MACPHYC_MODE_SEL_RMII 0x0
+#define MACPHYC_TX_SEL_MASK GENMASK(19, 19)
+#define MACPHYC_TX_SEL_ORIGIN 0x0
+#define MACPHYC_TX_SEL_DELAY 0x1
+#define MACPHYC_TX_DELAY_MASK GENMASK(18, 12)
+#define MACPHYC_RX_SEL_MASK GENMASK(11, 11)
+#define MACPHYC_RX_SEL_ORIGIN 0x0
+#define MACPHYC_RX_SEL_DELAY 0x1
+#define MACPHYC_RX_DELAY_MASK GENMASK(10, 4)
+#define MACPHYC_SOFT_RST_MASK GENMASK(3, 3)
+#define MACPHYC_PHY_INFT_MASK GENMASK(2, 0)
+#define MACPHYC_PHY_INFT_RMII 0x4
+#define MACPHYC_PHY_INFT_RGMII 0x1
+#define MACPHYC_PHY_INFT_GMII 0x0
+#define MACPHYC_PHY_INFT_MII 0x0
+
+#define MACPHYC_TX_DELAY_PS_MAX 2496
+#define MACPHYC_TX_DELAY_PS_MIN 20
+
+#define MACPHYC_RX_DELAY_PS_MAX 2496
+#define MACPHYC_RX_DELAY_PS_MIN 20
+
+enum ingenic_mac_version {
+ ID_JZ4775,
+ ID_X1000,
+ ID_X1600,
+ ID_X1830,
+ ID_X2000,
+};
+
+struct ingenic_mac {
+ const struct ingenic_soc_info *soc_info;
+ struct device *dev;
+ struct regmap *regmap;
+
+ int rx_delay;
+ int tx_delay;
+};
+
+struct ingenic_soc_info {
+ enum ingenic_mac_version version;
+ u32 mask;
+
+ int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
+};
+
+static int ingenic_mac_init(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ int ret;
+
+ if (mac->soc_info->set_mode) {
+ ret = mac->soc_info->set_mode(plat_dat);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int jz4775_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_MII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_MII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_GMII:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_GMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_GMII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = FIELD_PREP(MACPHYC_TXCLK_SEL_MASK, MACPHYC_TXCLK_SEL_INPUT) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int x1000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, 0);
+}
+
+static int x1600_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int x1830_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_MODE_SEL_MASK, MACPHYC_MODE_SEL_RMII) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int x2000_mac_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct ingenic_mac *mac = plat_dat->bsp_priv;
+ unsigned int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_RMII:
+ val = FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN) |
+ FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN) |
+ FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RMII);
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RMII\n");
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ val = FIELD_PREP(MACPHYC_PHY_INFT_MASK, MACPHYC_PHY_INFT_RGMII);
+
+ if (mac->tx_delay == 0)
+ val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_ORIGIN);
+ else
+ val |= FIELD_PREP(MACPHYC_TX_SEL_MASK, MACPHYC_TX_SEL_DELAY) |
+ FIELD_PREP(MACPHYC_TX_DELAY_MASK, (mac->tx_delay + 9750) / 19500 - 1);
+
+ if (mac->rx_delay == 0)
+ val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_ORIGIN);
+ else
+ val |= FIELD_PREP(MACPHYC_RX_SEL_MASK, MACPHYC_RX_SEL_DELAY) |
+ FIELD_PREP(MACPHYC_RX_DELAY_MASK, (mac->rx_delay + 9750) / 19500 - 1);
+
+ dev_dbg(mac->dev, "MAC PHY Control Register: PHY_INTERFACE_MODE_RGMII\n");
+ break;
+
+ default:
+ dev_err(mac->dev, "Unsupported interface %d", plat_dat->interface);
+ return -EINVAL;
+ }
+
+ /* Update MAC PHY control register */
+ return regmap_update_bits(mac->regmap, 0, mac->soc_info->mask, val);
+}
+
+static int ingenic_mac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct ingenic_mac *mac;
+ const struct ingenic_soc_info *data;
+ u32 tx_delay_ps, rx_delay_ps;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ mac = devm_kzalloc(&pdev->dev, sizeof(*mac), GFP_KERNEL);
+ if (!mac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "No of match data provided\n");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ /* Get MAC PHY control register */
+ mac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "mode-reg");
+ if (IS_ERR(mac->regmap)) {
+ dev_err(&pdev->dev, "%s: Failed to get syscon regmap\n", __func__);
+ ret = PTR_ERR(mac->regmap);
+ goto err_remove_config_dt;
+ }
+
+ if (!of_property_read_u32(pdev->dev.of_node, "tx-clk-delay-ps", &tx_delay_ps)) {
+ if (tx_delay_ps >= MACPHYC_TX_DELAY_PS_MIN &&
+ tx_delay_ps <= MACPHYC_TX_DELAY_PS_MAX) {
+ mac->tx_delay = tx_delay_ps * 1000;
+ } else {
+ dev_err(&pdev->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+ }
+
+ if (!of_property_read_u32(pdev->dev.of_node, "rx-clk-delay-ps", &rx_delay_ps)) {
+ if (rx_delay_ps >= MACPHYC_RX_DELAY_PS_MIN &&
+ rx_delay_ps <= MACPHYC_RX_DELAY_PS_MAX) {
+ mac->rx_delay = rx_delay_ps * 1000;
+ } else {
+ dev_err(&pdev->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+ }
+
+ mac->soc_info = data;
+ mac->dev = &pdev->dev;
+
+ plat_dat->bsp_priv = mac;
+
+ ret = ingenic_mac_init(plat_dat);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int ingenic_mac_suspend(struct device *dev)
+{
+ int ret;
+
+ ret = stmmac_suspend(dev);
+
+ return ret;
+}
+
+static int ingenic_mac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = ingenic_mac_init(priv->plat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_resume(dev);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(ingenic_mac_pm_ops, ingenic_mac_suspend, ingenic_mac_resume);
+
+static struct ingenic_soc_info jz4775_soc_info = {
+ .version = ID_JZ4775,
+ .mask = MACPHYC_TXCLK_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = jz4775_mac_set_mode,
+};
+
+static struct ingenic_soc_info x1000_soc_info = {
+ .version = ID_X1000,
+ .mask = MACPHYC_SOFT_RST_MASK,
+
+ .set_mode = x1000_mac_set_mode,
+};
+
+static struct ingenic_soc_info x1600_soc_info = {
+ .version = ID_X1600,
+ .mask = MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = x1600_mac_set_mode,
+};
+
+static struct ingenic_soc_info x1830_soc_info = {
+ .version = ID_X1830,
+ .mask = MACPHYC_MODE_SEL_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = x1830_mac_set_mode,
+};
+
+static struct ingenic_soc_info x2000_soc_info = {
+ .version = ID_X2000,
+ .mask = MACPHYC_TX_SEL_MASK | MACPHYC_TX_DELAY_MASK | MACPHYC_RX_SEL_MASK |
+ MACPHYC_RX_DELAY_MASK | MACPHYC_SOFT_RST_MASK | MACPHYC_PHY_INFT_MASK,
+
+ .set_mode = x2000_mac_set_mode,
+};
+
+static const struct of_device_id ingenic_mac_of_matches[] = {
+ { .compatible = "ingenic,jz4775-mac", .data = &jz4775_soc_info },
+ { .compatible = "ingenic,x1000-mac", .data = &x1000_soc_info },
+ { .compatible = "ingenic,x1600-mac", .data = &x1600_soc_info },
+ { .compatible = "ingenic,x1830-mac", .data = &x1830_soc_info },
+ { .compatible = "ingenic,x2000-mac", .data = &x2000_soc_info },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ingenic_mac_of_matches);
+
+static struct platform_driver ingenic_mac_driver = {
+ .probe = ingenic_mac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "ingenic-mac",
+ .pm = pm_ptr(&ingenic_mac_pm_ops),
+ .of_match_table = ingenic_mac_of_matches,
+ },
+};
+module_platform_driver(ingenic_mac_driver);
+
+MODULE_AUTHOR("周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>");
+MODULE_DESCRIPTION("Ingenic SoCs DWMAC specific glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
new file mode 100644
index 000000000..06d287f10
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Intel DWMAC platform driver
+ *
+ * Copyright(C) 2020 Intel Corporation
+ */
+
+#include <linux/ethtool.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "dwmac4.h"
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+struct intel_dwmac {
+ struct device *dev;
+ struct clk *tx_clk;
+ const struct intel_dwmac_data *data;
+};
+
+struct intel_dwmac_data {
+ void (*fix_mac_speed)(void *priv, unsigned int speed);
+ unsigned long ptp_ref_clk_rate;
+ unsigned long tx_clk_rate;
+ bool tx_clk_en;
+};
+
+static void kmb_eth_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct intel_dwmac *dwmac = priv;
+ unsigned long rate;
+ int ret;
+
+ rate = clk_get_rate(dwmac->tx_clk);
+
+ switch (speed) {
+ case SPEED_1000:
+ rate = 125000000;
+ break;
+
+ case SPEED_100:
+ rate = 25000000;
+ break;
+
+ case SPEED_10:
+ rate = 2500000;
+ break;
+
+ default:
+ dev_err(dwmac->dev, "Invalid speed\n");
+ break;
+ }
+
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret)
+ dev_err(dwmac->dev, "Failed to configure tx clock rate\n");
+}
+
+static const struct intel_dwmac_data kmb_data = {
+ .fix_mac_speed = kmb_eth_fix_mac_speed,
+ .ptp_ref_clk_rate = 200000000,
+ .tx_clk_rate = 125000000,
+ .tx_clk_en = true,
+};
+
+static const struct of_device_id intel_eth_plat_match[] = {
+ { .compatible = "intel,keembay-dwmac", .data = &kmb_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, intel_eth_plat_match);
+
+static int intel_eth_plat_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ const struct of_device_id *match;
+ struct intel_dwmac *dwmac;
+ unsigned long rate;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->dev = &pdev->dev;
+ dwmac->tx_clk = NULL;
+
+ match = of_match_device(intel_eth_plat_match, &pdev->dev);
+ if (match && match->data) {
+ dwmac->data = (const struct intel_dwmac_data *)match->data;
+
+ if (dwmac->data->fix_mac_speed)
+ plat_dat->fix_mac_speed = dwmac->data->fix_mac_speed;
+
+ /* Enable TX clock */
+ if (dwmac->data->tx_clk_en) {
+ dwmac->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+ if (IS_ERR(dwmac->tx_clk)) {
+ ret = PTR_ERR(dwmac->tx_clk);
+ goto err_remove_config_dt;
+ }
+
+ clk_prepare_enable(dwmac->tx_clk);
+
+ /* Check and configure TX clock rate */
+ rate = clk_get_rate(dwmac->tx_clk);
+ if (dwmac->data->tx_clk_rate &&
+ rate != dwmac->data->tx_clk_rate) {
+ rate = dwmac->data->tx_clk_rate;
+ ret = clk_set_rate(dwmac->tx_clk, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set tx_clk\n");
+ goto err_remove_config_dt;
+ }
+ }
+ }
+
+ /* Check and configure PTP ref clock rate */
+ rate = clk_get_rate(plat_dat->clk_ptp_ref);
+ if (dwmac->data->ptp_ref_clk_rate &&
+ rate != dwmac->data->ptp_ref_clk_rate) {
+ rate = dwmac->data->ptp_ref_clk_rate;
+ ret = clk_set_rate(plat_dat->clk_ptp_ref, rate);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to set clk_ptp_ref\n");
+ goto err_remove_config_dt;
+ }
+ }
+ }
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->eee_usecs_rate = plat_dat->clk_ptp_rate;
+
+ if (plat_dat->eee_usecs_rate > 0) {
+ u32 tx_lpi_usec;
+
+ tx_lpi_usec = (plat_dat->eee_usecs_rate / 1000000) - 1;
+ writel(tx_lpi_usec, stmmac_res.addr + GMAC_1US_TIC_COUNTER);
+ }
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret) {
+ clk_disable_unprepare(dwmac->tx_clk);
+ goto err_remove_config_dt;
+ }
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int intel_eth_plat_remove(struct platform_device *pdev)
+{
+ struct intel_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ int ret;
+
+ ret = stmmac_pltfr_remove(pdev);
+ clk_disable_unprepare(dwmac->tx_clk);
+
+ return ret;
+}
+
+static struct platform_driver intel_eth_plat_driver = {
+ .probe = intel_eth_plat_probe,
+ .remove = intel_eth_plat_remove,
+ .driver = {
+ .name = "intel-eth-plat",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = intel_eth_plat_match,
+ },
+};
+module_platform_driver(intel_eth_plat_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel DWMAC platform driver");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
new file mode 100644
index 000000000..ab9f876b6
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c
@@ -0,0 +1,1241 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Intel Corporation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
+#include "dwmac-intel.h"
+#include "dwmac4.h"
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+struct intel_priv_data {
+ int mdio_adhoc_addr; /* mdio address for serdes & etc */
+ unsigned long crossts_adj;
+ bool is_pse;
+};
+
+/* This struct is used to associate PCI Function of MAC controller on a board,
+ * discovered via DMI, with the address of PHY connected to the MAC. The
+ * negative value of the address means that MAC controller is not connected
+ * with PHY.
+ */
+struct stmmac_pci_func_data {
+ unsigned int func;
+ int phy_addr;
+};
+
+struct stmmac_pci_dmi_data {
+ const struct stmmac_pci_func_data *func;
+ size_t nfuncs;
+};
+
+struct stmmac_pci_info {
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
+};
+
+static int stmmac_pci_find_phy_addr(struct pci_dev *pdev,
+ const struct dmi_system_id *dmi_list)
+{
+ const struct stmmac_pci_func_data *func_data;
+ const struct stmmac_pci_dmi_data *dmi_data;
+ const struct dmi_system_id *dmi_id;
+ int func = PCI_FUNC(pdev->devfn);
+ size_t n;
+
+ dmi_id = dmi_first_match(dmi_list);
+ if (!dmi_id)
+ return -ENODEV;
+
+ dmi_data = dmi_id->driver_data;
+ func_data = dmi_data->func;
+
+ for (n = 0; n < dmi_data->nfuncs; n++, func_data++)
+ if (func_data->func == func)
+ return func_data->phy_addr;
+
+ return -ENODEV;
+}
+
+static int serdes_status_poll(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 mask, u32 val)
+{
+ unsigned int retries = 10;
+ int val_rd;
+
+ do {
+ val_rd = mdiobus_read(priv->mii, phyaddr, phyreg);
+ if ((val_rd & mask) == (val & mask))
+ return 0;
+ udelay(POLL_DELAY_US);
+ } while (--retries);
+
+ return -ETIMEDOUT;
+}
+
+static int intel_serdes_powerup(struct net_device *ndev, void *priv_data)
+{
+ struct intel_priv_data *intel_priv = priv_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int serdes_phy_addr = 0;
+ u32 data = 0;
+
+ if (!intel_priv->mdio_adhoc_addr)
+ return 0;
+
+ serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+
+ /* Set the serdes rate and the PCLK rate */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR0);
+
+ data &= ~SERDES_RATE_MASK;
+ data &= ~SERDES_PCLK_MASK;
+
+ if (priv->plat->max_speed == 2500)
+ data |= SERDES_RATE_PCIE_GEN2 << SERDES_RATE_PCIE_SHIFT |
+ SERDES_PCLK_37p5MHZ << SERDES_PCLK_SHIFT;
+ else
+ data |= SERDES_RATE_PCIE_GEN1 << SERDES_RATE_PCIE_SHIFT |
+ SERDES_PCLK_70MHZ << SERDES_PCLK_SHIFT;
+
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
+
+ /* assert clk_req */
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
+ data |= SERDES_PLL_CLK;
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
+
+ /* check for clk_ack assertion */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_PLL_CLK,
+ SERDES_PLL_CLK);
+
+ if (data) {
+ dev_err(priv->device, "Serdes PLL clk request timeout\n");
+ return data;
+ }
+
+ /* assert lane reset */
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
+ data |= SERDES_RST;
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
+
+ /* check for assert lane reset reflection */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_RST,
+ SERDES_RST);
+
+ if (data) {
+ dev_err(priv->device, "Serdes assert lane reset timeout\n");
+ return data;
+ }
+
+ /* move power state to P0 */
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
+
+ data &= ~SERDES_PWR_ST_MASK;
+ data |= SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT;
+
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
+
+ /* Check for P0 state */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_PWR_ST_MASK,
+ SERDES_PWR_ST_P0 << SERDES_PWR_ST_SHIFT);
+
+ if (data) {
+ dev_err(priv->device, "Serdes power state P0 timeout.\n");
+ return data;
+ }
+
+ /* PSE only - ungate SGMII PHY Rx Clock */
+ if (intel_priv->is_pse)
+ mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
+ 0, SERDES_PHY_RX_CLK);
+
+ return 0;
+}
+
+static void intel_serdes_powerdown(struct net_device *ndev, void *intel_data)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int serdes_phy_addr = 0;
+ u32 data = 0;
+
+ if (!intel_priv->mdio_adhoc_addr)
+ return;
+
+ serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+
+ /* PSE only - gate SGMII PHY Rx Clock */
+ if (intel_priv->is_pse)
+ mdiobus_modify(priv->mii, serdes_phy_addr, SERDES_GCR0,
+ SERDES_PHY_RX_CLK, 0);
+
+ /* move power state to P3 */
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
+
+ data &= ~SERDES_PWR_ST_MASK;
+ data |= SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT;
+
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
+
+ /* Check for P3 state */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_PWR_ST_MASK,
+ SERDES_PWR_ST_P3 << SERDES_PWR_ST_SHIFT);
+
+ if (data) {
+ dev_err(priv->device, "Serdes power state P3 timeout\n");
+ return;
+ }
+
+ /* de-assert clk_req */
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
+ data &= ~SERDES_PLL_CLK;
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
+
+ /* check for clk_ack de-assert */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_PLL_CLK,
+ (u32)~SERDES_PLL_CLK);
+
+ if (data) {
+ dev_err(priv->device, "Serdes PLL clk de-assert timeout\n");
+ return;
+ }
+
+ /* de-assert lane reset */
+ data = mdiobus_read(priv->mii, serdes_phy_addr, SERDES_GCR0);
+ data &= ~SERDES_RST;
+ mdiobus_write(priv->mii, serdes_phy_addr, SERDES_GCR0, data);
+
+ /* check for de-assert lane reset reflection */
+ data = serdes_status_poll(priv, serdes_phy_addr,
+ SERDES_GSR0,
+ SERDES_RST,
+ (u32)~SERDES_RST);
+
+ if (data) {
+ dev_err(priv->device, "Serdes de-assert lane reset timeout\n");
+ return;
+ }
+}
+
+static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data)
+{
+ struct intel_priv_data *intel_priv = intel_data;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int serdes_phy_addr = 0;
+ u32 data = 0;
+
+ serdes_phy_addr = intel_priv->mdio_adhoc_addr;
+
+ /* Determine the link speed mode: 2.5Gbps/1Gbps */
+ data = mdiobus_read(priv->mii, serdes_phy_addr,
+ SERDES_GCR);
+
+ if (((data & SERDES_LINK_MODE_MASK) >> SERDES_LINK_MODE_SHIFT) ==
+ SERDES_LINK_MODE_2G5) {
+ dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n");
+ priv->plat->max_speed = 2500;
+ priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX;
+ priv->plat->mdio_bus_data->xpcs_an_inband = false;
+ } else {
+ priv->plat->max_speed = 1000;
+ }
+}
+
+/* Program PTP Clock Frequency for different variant of
+ * Intel mGBE that has slightly different GPO mapping
+ */
+static void intel_mgbe_ptp_clk_freq_config(void *npriv)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)npriv;
+ struct intel_priv_data *intel_priv;
+ u32 gpio_value;
+
+ intel_priv = (struct intel_priv_data *)priv->plat->bsp_priv;
+
+ gpio_value = readl(priv->ioaddr + GMAC_GPIO_STATUS);
+
+ if (intel_priv->is_pse) {
+ /* For PSE GbE, use 200MHz */
+ gpio_value &= ~PSE_PTP_CLK_FREQ_MASK;
+ gpio_value |= PSE_PTP_CLK_FREQ_200MHZ;
+ } else {
+ /* For PCH GbE, use 200MHz */
+ gpio_value &= ~PCH_PTP_CLK_FREQ_MASK;
+ gpio_value |= PCH_PTP_CLK_FREQ_200MHZ;
+ }
+
+ writel(gpio_value, priv->ioaddr + GMAC_GPIO_STATUS);
+}
+
+static void get_arttime(struct mii_bus *mii, int intel_adhoc_addr,
+ u64 *art_time)
+{
+ u64 ns;
+
+ ns = mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE3);
+ ns <<= GMAC4_ART_TIME_SHIFT;
+ ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE2);
+ ns <<= GMAC4_ART_TIME_SHIFT;
+ ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE1);
+ ns <<= GMAC4_ART_TIME_SHIFT;
+ ns |= mdiobus_read(mii, intel_adhoc_addr, PMC_ART_VALUE0);
+
+ *art_time = ns;
+}
+
+static int stmmac_cross_ts_isr(struct stmmac_priv *priv)
+{
+ return (readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE);
+}
+
+static int intel_crosststamp(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct intel_priv_data *intel_priv;
+
+ struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
+ void __iomem *ptpaddr = priv->ptpaddr;
+ void __iomem *ioaddr = priv->hw->pcsr;
+ unsigned long flags;
+ u64 art_time = 0;
+ u64 ptp_time = 0;
+ u32 num_snapshot;
+ u32 gpio_value;
+ u32 acr_value;
+ int i;
+
+ if (!boot_cpu_has(X86_FEATURE_ART))
+ return -EOPNOTSUPP;
+
+ intel_priv = priv->plat->bsp_priv;
+
+ /* Both internal crosstimestamping and external triggered event
+ * timestamping cannot be run concurrently.
+ */
+ if (priv->plat->ext_snapshot_en)
+ return -EBUSY;
+
+ priv->plat->int_snapshot_en = 1;
+
+ mutex_lock(&priv->aux_ts_lock);
+ /* Enable Internal snapshot trigger */
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value &= ~PTP_ACR_MASK;
+ switch (priv->plat->int_snapshot_num) {
+ case AUX_SNAPSHOT0:
+ acr_value |= PTP_ACR_ATSEN0;
+ break;
+ case AUX_SNAPSHOT1:
+ acr_value |= PTP_ACR_ATSEN1;
+ break;
+ case AUX_SNAPSHOT2:
+ acr_value |= PTP_ACR_ATSEN2;
+ break;
+ case AUX_SNAPSHOT3:
+ acr_value |= PTP_ACR_ATSEN3;
+ break;
+ default:
+ mutex_unlock(&priv->aux_ts_lock);
+ priv->plat->int_snapshot_en = 0;
+ return -EINVAL;
+ }
+ writel(acr_value, ptpaddr + PTP_ACR);
+
+ /* Clear FIFO */
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value |= PTP_ACR_ATSFC;
+ writel(acr_value, ptpaddr + PTP_ACR);
+ /* Release the mutex */
+ mutex_unlock(&priv->aux_ts_lock);
+
+ /* Trigger Internal snapshot signal
+ * Create a rising edge by just toggle the GPO1 to low
+ * and back to high.
+ */
+ gpio_value = readl(ioaddr + GMAC_GPIO_STATUS);
+ gpio_value &= ~GMAC_GPO1;
+ writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
+ gpio_value |= GMAC_GPO1;
+ writel(gpio_value, ioaddr + GMAC_GPIO_STATUS);
+
+ /* Time sync done Indication - Interrupt method */
+ if (!wait_event_interruptible_timeout(priv->tstamp_busy_wait,
+ stmmac_cross_ts_isr(priv),
+ HZ / 100)) {
+ priv->plat->int_snapshot_en = 0;
+ return -ETIMEDOUT;
+ }
+
+ num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) &
+ GMAC_TIMESTAMP_ATSNS_MASK) >>
+ GMAC_TIMESTAMP_ATSNS_SHIFT;
+
+ /* Repeat until the timestamps are from the FIFO last segment */
+ for (i = 0; i < num_snapshot; i++) {
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_ptptime(priv, ptpaddr, &ptp_time);
+ *device = ns_to_ktime(ptp_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+ get_arttime(priv->mii, intel_priv->mdio_adhoc_addr, &art_time);
+ *system = convert_art_to_tsc(art_time);
+ }
+
+ system->cycles *= intel_priv->crossts_adj;
+ priv->plat->int_snapshot_en = 0;
+
+ return 0;
+}
+
+static void intel_mgbe_pse_crossts_adj(struct intel_priv_data *intel_priv,
+ int base)
+{
+ if (boot_cpu_has(X86_FEATURE_ART)) {
+ unsigned int art_freq;
+
+ /* On systems that support ART, ART frequency can be obtained
+ * from ECX register of CPUID leaf (0x15).
+ */
+ art_freq = cpuid_ecx(ART_CPUID_LEAF);
+ do_div(art_freq, base);
+ intel_priv->crossts_adj = art_freq;
+ }
+}
+
+static void common_default_data(struct plat_stmmacenet_data *plat)
+{
+ plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->has_gmac = 1;
+ plat->force_sf_dma_mode = 1;
+
+ plat->mdio_bus_data->needs_reset = true;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[0].use_prio = false;
+ plat->rx_queues_cfg[0].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[0].pkt_route = 0x0;
+}
+
+static int intel_mgbe_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct fwnode_handle *fwnode;
+ char clk_name[20];
+ int ret;
+ int i;
+
+ plat->pdev = pdev;
+ plat->phy_addr = -1;
+ plat->clk_csr = 5;
+ plat->has_gmac = 0;
+ plat->has_gmac4 = 1;
+ plat->force_sf_dma_mode = 0;
+ plat->tso_en = 1;
+ plat->sph_disable = 1;
+
+ /* Multiplying factor to the clk_eee_i clock time
+ * period to make it closer to 100 ns. This value
+ * should be programmed such that the clk_eee_time_period *
+ * (MULT_FACT_100NS + 1) should be within 80 ns to 120 ns
+ * clk_eee frequency is 19.2Mhz
+ * clk_eee_time_period is 52ns
+ * 52ns * (1 + 1) = 104ns
+ * MULT_FACT_100NS = 1
+ */
+ plat->mult_fact_100ns = 1;
+
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+ plat->rx_queues_cfg[i].chan = i;
+
+ /* Disable Priority config by default */
+ plat->rx_queues_cfg[i].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[i].pkt_route = 0x0;
+ }
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[i].use_prio = false;
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
+ }
+
+ /* FIFO size is 4096 bytes for 1 tx/rx queue */
+ plat->tx_fifo_size = plat->tx_queues_to_use * 4096;
+ plat->rx_fifo_size = plat->rx_queues_to_use * 4096;
+
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ plat->tx_queues_cfg[0].weight = 0x09;
+ plat->tx_queues_cfg[1].weight = 0x0A;
+ plat->tx_queues_cfg[2].weight = 0x0B;
+ plat->tx_queues_cfg[3].weight = 0x0C;
+ plat->tx_queues_cfg[4].weight = 0x0D;
+ plat->tx_queues_cfg[5].weight = 0x0E;
+ plat->tx_queues_cfg[6].weight = 0x0F;
+ plat->tx_queues_cfg[7].weight = 0x10;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->fixed_burst = 0;
+ plat->dma_cfg->mixed_burst = 0;
+ plat->dma_cfg->aal = 0;
+ plat->dma_cfg->dche = true;
+
+ plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi),
+ GFP_KERNEL);
+ if (!plat->axi)
+ return -ENOMEM;
+
+ plat->axi->axi_lpi_en = 0;
+ plat->axi->axi_xit_frm = 0;
+ plat->axi->axi_wr_osr_lmt = 1;
+ plat->axi->axi_rd_osr_lmt = 1;
+ plat->axi->axi_blen[0] = 4;
+ plat->axi->axi_blen[1] = 8;
+ plat->axi->axi_blen[2] = 16;
+
+ plat->ptp_max_adj = plat->clk_ptp_rate;
+ plat->eee_usecs_rate = plat->clk_ptp_rate;
+
+ /* Set system clock */
+ sprintf(clk_name, "%s-%s", "stmmac", pci_name(pdev));
+
+ plat->stmmac_clk = clk_register_fixed_rate(&pdev->dev,
+ clk_name, NULL, 0,
+ plat->clk_ptp_rate);
+
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Fail to register stmmac-clk\n");
+ plat->stmmac_clk = NULL;
+ }
+
+ ret = clk_prepare_enable(plat->stmmac_clk);
+ if (ret) {
+ clk_unregister_fixed_rate(plat->stmmac_clk);
+ return ret;
+ }
+
+ plat->ptp_clk_freq_config = intel_mgbe_ptp_clk_freq_config;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ plat->vlan_fail_q_en = true;
+
+ /* Use the last Rx queue */
+ plat->vlan_fail_q = plat->rx_queues_to_use - 1;
+
+ /* For fixed-link setup, we allow phy-mode setting */
+ fwnode = dev_fwnode(&pdev->dev);
+ if (fwnode) {
+ int phy_mode;
+
+ /* "phy-mode" setting is optional. If it is set,
+ * we allow either sgmii or 1000base-x for now.
+ */
+ phy_mode = fwnode_get_phy_mode(fwnode);
+ if (phy_mode >= 0) {
+ if (phy_mode == PHY_INTERFACE_MODE_SGMII ||
+ phy_mode == PHY_INTERFACE_MODE_1000BASEX)
+ plat->phy_interface = phy_mode;
+ else
+ dev_warn(&pdev->dev, "Invalid phy-mode\n");
+ }
+ }
+
+ /* Intel mgbe SGMII interface uses pcs-xcps */
+ if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII ||
+ plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) {
+ plat->mdio_bus_data->has_xpcs = true;
+ plat->mdio_bus_data->xpcs_an_inband = true;
+ }
+
+ /* For fixed-link setup, we clear xpcs_an_inband */
+ if (fwnode) {
+ struct fwnode_handle *fixed_node;
+
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (fixed_node)
+ plat->mdio_bus_data->xpcs_an_inband = false;
+
+ fwnode_handle_put(fixed_node);
+ }
+
+ /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */
+ plat->mdio_bus_data->phy_mask = 1 << INTEL_MGBE_ADHOC_ADDR;
+ plat->mdio_bus_data->phy_mask |= 1 << INTEL_MGBE_XPCS_ADDR;
+
+ plat->int_snapshot_num = AUX_SNAPSHOT1;
+ plat->ext_snapshot_num = AUX_SNAPSHOT0;
+
+ plat->crosststamp = intel_crosststamp;
+ plat->int_snapshot_en = 0;
+
+ /* Setup MSI vector offset specific to Intel mGbE controller */
+ plat->msi_mac_vec = 29;
+ plat->msi_lpi_vec = 28;
+ plat->msi_sfty_ce_vec = 27;
+ plat->msi_sfty_ue_vec = 26;
+ plat->msi_rx_base_vec = 0;
+ plat->msi_tx_base_vec = 1;
+
+ return 0;
+}
+
+static int ehl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->rx_queues_to_use = 8;
+ plat->tx_queues_to_use = 8;
+ plat->use_phy_wol = 1;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 1;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
+ return intel_mgbe_common_data(pdev, plat);
+}
+
+static int ehl_sgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+
+ plat->clk_ptp_rate = 204800000;
+
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_sgmii1g_info = {
+ .setup = ehl_sgmii_data,
+};
+
+static int ehl_rgmii_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII;
+
+ plat->clk_ptp_rate = 204800000;
+
+ return ehl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_rgmii1g_info = {
+ .setup = ehl_rgmii_data,
+};
+
+static int ehl_pse0_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ intel_priv->is_pse = true;
+ plat->bus_id = 2;
+ plat->host_dma_width = 32;
+
+ plat->clk_ptp_rate = 200000000;
+
+ intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
+
+ return ehl_common_data(pdev, plat);
+}
+
+static int ehl_pse0_rgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
+ return ehl_pse0_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse0_rgmii1g_info = {
+ .setup = ehl_pse0_rgmii1g_data,
+};
+
+static int ehl_pse0_sgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ return ehl_pse0_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse0_sgmii1g_info = {
+ .setup = ehl_pse0_sgmii1g_data,
+};
+
+static int ehl_pse1_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct intel_priv_data *intel_priv = plat->bsp_priv;
+
+ intel_priv->is_pse = true;
+ plat->bus_id = 3;
+ plat->host_dma_width = 32;
+
+ plat->clk_ptp_rate = 200000000;
+
+ intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ);
+
+ return ehl_common_data(pdev, plat);
+}
+
+static int ehl_pse1_rgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_RGMII_ID;
+ return ehl_pse1_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse1_rgmii1g_info = {
+ .setup = ehl_pse1_rgmii1g_data,
+};
+
+static int ehl_pse1_sgmii1g_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ return ehl_pse1_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info ehl_pse1_sgmii1g_info = {
+ .setup = ehl_pse1_sgmii1g_data,
+};
+
+static int tgl_common_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->rx_queues_to_use = 6;
+ plat->tx_queues_to_use = 4;
+ plat->clk_ptp_rate = 204800000;
+ plat->speed_mode_2500 = intel_speed_mode_2500;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 0;
+ plat->safety_feat_cfg->prtyen = 0;
+ plat->safety_feat_cfg->tmouten = 0;
+
+ return intel_mgbe_common_data(pdev, plat);
+}
+
+static int tgl_sgmii_phy0_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info tgl_sgmii1g_phy0_info = {
+ .setup = tgl_sgmii_phy0_data,
+};
+
+static int tgl_sgmii_phy1_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 2;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+ plat->serdes_powerup = intel_serdes_powerup;
+ plat->serdes_powerdown = intel_serdes_powerdown;
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info tgl_sgmii1g_phy1_info = {
+ .setup = tgl_sgmii_phy1_data,
+};
+
+static int adls_sgmii_phy0_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 1;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
+ /* SerDes power up and power down are done in BIOS for ADL */
+
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adls_sgmii1g_phy0_info = {
+ .setup = adls_sgmii_phy0_data,
+};
+
+static int adls_sgmii_phy1_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ plat->bus_id = 2;
+ plat->phy_interface = PHY_INTERFACE_MODE_SGMII;
+
+ /* SerDes power up and power down are done in BIOS for ADL */
+
+ return tgl_common_data(pdev, plat);
+}
+
+static struct stmmac_pci_info adls_sgmii1g_phy1_info = {
+ .setup = adls_sgmii_phy1_data,
+};
+static const struct stmmac_pci_func_data galileo_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data galileo_stmmac_dmi_data = {
+ .func = galileo_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(galileo_stmmac_func_data),
+};
+
+static const struct stmmac_pci_func_data iot2040_stmmac_func_data[] = {
+ {
+ .func = 6,
+ .phy_addr = 1,
+ },
+ {
+ .func = 7,
+ .phy_addr = 1,
+ },
+};
+
+static const struct stmmac_pci_dmi_data iot2040_stmmac_dmi_data = {
+ .func = iot2040_stmmac_func_data,
+ .nfuncs = ARRAY_SIZE(iot2040_stmmac_func_data),
+};
+
+static const struct dmi_system_id quark_pci_dmi[] = {
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Galileo"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GalileoGen2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ /* There are 2 types of SIMATIC IOT2000: IOT2020 and IOT2040.
+ * The asset tag "6ES7647-0AA00-0YA2" is only for IOT2020 which
+ * has only one pci network device while other asset tags are
+ * for IOT2040 which has two.
+ */
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ DMI_EXACT_MATCH(DMI_BOARD_ASSET_TAG,
+ "6ES7647-0AA00-0YA2"),
+ },
+ .driver_data = (void *)&galileo_stmmac_dmi_data,
+ },
+ {
+ .matches = {
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "SIMATIC IOT2000"),
+ },
+ .driver_data = (void *)&iot2040_stmmac_dmi_data,
+ },
+ {}
+};
+
+static int quark_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int ret;
+
+ /* Set common default data first */
+ common_default_data(plat);
+
+ /* Refuse to load the driver and register net device if MAC controller
+ * does not connect to any PHY interface.
+ */
+ ret = stmmac_pci_find_phy_addr(pdev, quark_pci_dmi);
+ if (ret < 0) {
+ /* Return error to the caller on DMI enabled boards. */
+ if (dmi_get_system_info(DMI_BOARD_NAME))
+ return ret;
+
+ /* Galileo boards with old firmware don't support DMI. We always
+ * use 1 here as PHY address, so at least the first found MAC
+ * controller would be probed.
+ */
+ ret = 1;
+ }
+
+ plat->bus_id = pci_dev_id(pdev);
+ plat->phy_addr = ret;
+ plat->phy_interface = PHY_INTERFACE_MODE_RMII;
+
+ plat->dma_cfg->pbl = 16;
+ plat->dma_cfg->pblx8 = true;
+ plat->dma_cfg->fixed_burst = 1;
+ /* AXI (TODO) */
+
+ return 0;
+}
+
+static const struct stmmac_pci_info quark_info = {
+ .setup = quark_default_data,
+};
+
+static int stmmac_config_single_msi(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "%s: Single IRQ enablement failed\n",
+ __func__);
+ return ret;
+ }
+
+ res->irq = pci_irq_vector(pdev, 0);
+ res->wol_irq = res->irq;
+ plat->multi_msi_en = 0;
+ dev_info(&pdev->dev, "%s: Single IRQ enablement successful\n",
+ __func__);
+
+ return 0;
+}
+
+static int stmmac_config_multi_msi(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct stmmac_resources *res)
+{
+ int ret;
+ int i;
+
+ if (plat->msi_rx_base_vec >= STMMAC_MSI_VEC_MAX ||
+ plat->msi_tx_base_vec >= STMMAC_MSI_VEC_MAX) {
+ dev_info(&pdev->dev, "%s: Invalid RX & TX vector defined\n",
+ __func__);
+ return -1;
+ }
+
+ ret = pci_alloc_irq_vectors(pdev, 2, STMMAC_MSI_VEC_MAX,
+ PCI_IRQ_MSI | PCI_IRQ_MSIX);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "%s: multi MSI enablement failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* For RX MSI */
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ res->rx_irq[i] = pci_irq_vector(pdev,
+ plat->msi_rx_base_vec + i * 2);
+ }
+
+ /* For TX MSI */
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ res->tx_irq[i] = pci_irq_vector(pdev,
+ plat->msi_tx_base_vec + i * 2);
+ }
+
+ if (plat->msi_mac_vec < STMMAC_MSI_VEC_MAX)
+ res->irq = pci_irq_vector(pdev, plat->msi_mac_vec);
+ if (plat->msi_wol_vec < STMMAC_MSI_VEC_MAX)
+ res->wol_irq = pci_irq_vector(pdev, plat->msi_wol_vec);
+ if (plat->msi_lpi_vec < STMMAC_MSI_VEC_MAX)
+ res->lpi_irq = pci_irq_vector(pdev, plat->msi_lpi_vec);
+ if (plat->msi_sfty_ce_vec < STMMAC_MSI_VEC_MAX)
+ res->sfty_ce_irq = pci_irq_vector(pdev, plat->msi_sfty_ce_vec);
+ if (plat->msi_sfty_ue_vec < STMMAC_MSI_VEC_MAX)
+ res->sfty_ue_irq = pci_irq_vector(pdev, plat->msi_sfty_ue_vec);
+
+ plat->multi_msi_en = 1;
+ dev_info(&pdev->dev, "%s: multi MSI enablement successful\n", __func__);
+
+ return 0;
+}
+
+/**
+ * intel_eth_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int intel_eth_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
+ struct intel_priv_data *intel_priv;
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res;
+ int ret;
+
+ intel_priv = devm_kzalloc(&pdev->dev, sizeof(*intel_priv), GFP_KERNEL);
+ if (!intel_priv)
+ return -ENOMEM;
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
+ GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return -ENOMEM;
+
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
+
+ /* Enable pci device */
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
+ __func__);
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ plat->bsp_priv = intel_priv;
+ intel_priv->mdio_adhoc_addr = INTEL_MGBE_ADHOC_ADDR;
+ intel_priv->crossts_adj = 1;
+
+ /* Initialize all MSI vectors to invalid so that it can be set
+ * according to platform data settings below.
+ * Note: MSI vector takes value from 0 upto 31 (STMMAC_MSI_VEC_MAX)
+ */
+ plat->msi_mac_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_wol_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_lpi_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_sfty_ce_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_sfty_ue_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_rx_base_vec = STMMAC_MSI_VEC_MAX;
+ plat->msi_tx_base_vec = STMMAC_MSI_VEC_MAX;
+
+ ret = info->setup(pdev, plat);
+ if (ret)
+ return ret;
+
+ memset(&res, 0, sizeof(res));
+ res.addr = pcim_iomap_table(pdev)[0];
+
+ if (plat->eee_usecs_rate > 0) {
+ u32 tx_lpi_usec;
+
+ tx_lpi_usec = (plat->eee_usecs_rate / 1000000) - 1;
+ writel(tx_lpi_usec, res.addr + GMAC_1US_TIC_COUNTER);
+ }
+
+ ret = stmmac_config_multi_msi(pdev, plat, &res);
+ if (ret) {
+ ret = stmmac_config_single_msi(pdev, plat, &res);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable IRQ\n",
+ __func__);
+ goto err_alloc_irq;
+ }
+ }
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (ret) {
+ goto err_alloc_irq;
+ }
+
+ return 0;
+
+err_alloc_irq:
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_unregister_fixed_rate(plat->stmmac_clk);
+ return ret;
+}
+
+/**
+ * intel_eth_pci_remove
+ *
+ * @pdev: pci device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void intel_eth_pci_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_dvr_remove(&pdev->dev);
+
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_unregister_fixed_rate(priv->plat->stmmac_clk);
+}
+
+static int __maybe_unused intel_eth_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_wake_from_d3(pdev, true);
+ pci_set_power_state(pdev, PCI_D3hot);
+ return 0;
+}
+
+static int __maybe_unused intel_eth_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(intel_eth_pm_ops, intel_eth_pci_suspend,
+ intel_eth_pci_resume);
+
+#define PCI_DEVICE_ID_INTEL_QUARK 0x0937
+#define PCI_DEVICE_ID_INTEL_EHL_RGMII1G 0x4b30
+#define PCI_DEVICE_ID_INTEL_EHL_SGMII1G 0x4b31
+#define PCI_DEVICE_ID_INTEL_EHL_SGMII2G5 0x4b32
+/* Intel(R) Programmable Services Engine (Intel(R) PSE) consist of 2 MAC
+ * which are named PSE0 and PSE1
+ */
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_RGMII1G 0x4ba0
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII1G 0x4ba1
+#define PCI_DEVICE_ID_INTEL_EHL_PSE0_SGMII2G5 0x4ba2
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_RGMII1G 0x4bb0
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII1G 0x4bb1
+#define PCI_DEVICE_ID_INTEL_EHL_PSE1_SGMII2G5 0x4bb2
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_0 0x43ac
+#define PCI_DEVICE_ID_INTEL_TGLH_SGMII1G_1 0x43a2
+#define PCI_DEVICE_ID_INTEL_TGL_SGMII1G 0xa0ac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_0 0x7aac
+#define PCI_DEVICE_ID_INTEL_ADLS_SGMII1G_1 0x7aad
+#define PCI_DEVICE_ID_INTEL_ADLN_SGMII1G 0x54ac
+#define PCI_DEVICE_ID_INTEL_RPLP_SGMII1G 0x51ac
+
+static const struct pci_device_id intel_eth_pci_id_table[] = {
+ { PCI_DEVICE_DATA(INTEL, QUARK, &quark_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_RGMII1G, &ehl_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII1G, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_SGMII2G5, &ehl_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_RGMII1G, &ehl_pse0_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII1G, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE0_SGMII2G5, &ehl_pse0_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_RGMII1G, &ehl_pse1_rgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII1G, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, EHL_PSE1_SGMII2G5, &ehl_pse1_sgmii1g_info) },
+ { PCI_DEVICE_DATA(INTEL, TGL_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_0, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, TGLH_SGMII1G_1, &tgl_sgmii1g_phy1_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_0, &adls_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLS_SGMII1G_1, &adls_sgmii1g_phy1_info) },
+ { PCI_DEVICE_DATA(INTEL, ADLN_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ { PCI_DEVICE_DATA(INTEL, RPLP_SGMII1G, &tgl_sgmii1g_phy0_info) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, intel_eth_pci_id_table);
+
+static struct pci_driver intel_eth_pci_driver = {
+ .name = "intel-eth-pci",
+ .id_table = intel_eth_pci_id_table,
+ .probe = intel_eth_pci_probe,
+ .remove = intel_eth_pci_remove,
+ .driver = {
+ .pm = &intel_eth_pm_ops,
+ },
+};
+
+module_pci_driver(intel_eth_pci_driver);
+
+MODULE_DESCRIPTION("INTEL 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Voon Weifeng <weifeng.voon@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
new file mode 100644
index 000000000..0a3798747
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2020, Intel Corporation
+ * DWMAC Intel header file
+ */
+
+#ifndef __DWMAC_INTEL_H__
+#define __DWMAC_INTEL_H__
+
+#define POLL_DELAY_US 8
+
+/* SERDES Register */
+#define SERDES_GCR 0x0 /* Global Conguration */
+#define SERDES_GSR0 0x5 /* Global Status Reg0 */
+#define SERDES_GCR0 0xb /* Global Configuration Reg0 */
+
+/* SERDES defines */
+#define SERDES_PLL_CLK BIT(0) /* PLL clk valid signal */
+#define SERDES_PHY_RX_CLK BIT(1) /* PSE SGMII PHY rx clk */
+#define SERDES_RST BIT(2) /* Serdes Reset */
+#define SERDES_PWR_ST_MASK GENMASK(6, 4) /* Serdes Power state*/
+#define SERDES_RATE_MASK GENMASK(9, 8)
+#define SERDES_PCLK_MASK GENMASK(14, 12) /* PCLK rate to PHY */
+#define SERDES_LINK_MODE_MASK GENMASK(2, 1)
+#define SERDES_LINK_MODE_SHIFT 1
+#define SERDES_PWR_ST_SHIFT 4
+#define SERDES_PWR_ST_P0 0x0
+#define SERDES_PWR_ST_P3 0x3
+#define SERDES_LINK_MODE_2G5 0x3
+#define SERSED_LINK_MODE_1G 0x2
+#define SERDES_PCLK_37p5MHZ 0x0
+#define SERDES_PCLK_70MHZ 0x1
+#define SERDES_RATE_PCIE_GEN1 0x0
+#define SERDES_RATE_PCIE_GEN2 0x1
+#define SERDES_RATE_PCIE_SHIFT 8
+#define SERDES_PCLK_SHIFT 12
+
+#define INTEL_MGBE_ADHOC_ADDR 0x15
+#define INTEL_MGBE_XPCS_ADDR 0x16
+
+/* Cross-timestamping defines */
+#define ART_CPUID_LEAF 0x15
+#define EHL_PSE_ART_MHZ 19200000
+
+/* Selection for PTP Clock Freq belongs to PSE & PCH GbE */
+#define PSE_PTP_CLK_FREQ_MASK (GMAC_GPO0 | GMAC_GPO3)
+#define PSE_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
+#define PSE_PTP_CLK_FREQ_200MHZ (GMAC_GPO0 | GMAC_GPO3)
+#define PSE_PTP_CLK_FREQ_256MHZ (0)
+#define PCH_PTP_CLK_FREQ_MASK (GMAC_GPO0)
+#define PCH_PTP_CLK_FREQ_19_2MHZ (GMAC_GPO0)
+#define PCH_PTP_CLK_FREQ_200MHZ (0)
+
+#endif /* __DWMAC_INTEL_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
new file mode 100644
index 000000000..e888c8a9c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -0,0 +1,512 @@
+/*
+ * Qualcomm Atheros IPQ806x GMAC glue layer
+ *
+ * Copyright (C) 2015 The Linux Foundation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+#include <linux/sys_soc.h>
+#include <linux/bitfield.h>
+
+#include "stmmac_platform.h"
+
+#define NSS_COMMON_CLK_GATE 0x8
+#define NSS_COMMON_CLK_GATE_PTP_EN(x) BIT(0x10 + x)
+#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x) BIT(0x9 + (x * 2))
+#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x) BIT(0x8 + (x * 2))
+#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x) BIT(0x4 + x)
+#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x) BIT(0x0 + x)
+
+#define NSS_COMMON_CLK_DIV0 0xC
+#define NSS_COMMON_CLK_DIV_OFFSET(x) (x * 8)
+#define NSS_COMMON_CLK_DIV_MASK 0x7f
+
+#define NSS_COMMON_CLK_SRC_CTRL 0x14
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
+/* Mode is coded on 1 bit but is different depending on the MAC ID:
+ * MAC0: QSGMII=0 RGMII=1
+ * MAC1: QSGMII=0 SGMII=0 RGMII=1
+ * MAC2 & MAC3: QSGMII=0 SGMII=1
+ */
+#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1
+#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0)
+
+#define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4))
+#define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19)
+#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16)
+#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8
+#define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0
+
+#define NSS_COMMON_CLK_DIV_RGMII_1000 1
+#define NSS_COMMON_CLK_DIV_RGMII_100 9
+#define NSS_COMMON_CLK_DIV_RGMII_10 99
+#define NSS_COMMON_CLK_DIV_SGMII_1000 0
+#define NSS_COMMON_CLK_DIV_SGMII_100 4
+#define NSS_COMMON_CLK_DIV_SGMII_10 49
+
+#define QSGMII_PCS_ALL_CH_CTL 0x80
+#define QSGMII_PCS_CH_SPEED_FORCE BIT(1)
+#define QSGMII_PCS_CH_SPEED_10 0x0
+#define QSGMII_PCS_CH_SPEED_100 BIT(2)
+#define QSGMII_PCS_CH_SPEED_1000 BIT(3)
+#define QSGMII_PCS_CH_SPEED_MASK (QSGMII_PCS_CH_SPEED_FORCE | \
+ QSGMII_PCS_CH_SPEED_10 | \
+ QSGMII_PCS_CH_SPEED_100 | \
+ QSGMII_PCS_CH_SPEED_1000)
+#define QSGMII_PCS_CH_SPEED_SHIFT(x) ((x) * 4)
+
+#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
+#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
+
+/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */
+#define QSGMII_PHY_SGMII_CTL(x) ((x == 1) ? 0x134 : \
+ (0x13c + (4 * (x - 2))))
+#define QSGMII_PHY_CDR_EN BIT(0)
+#define QSGMII_PHY_RX_FRONT_EN BIT(1)
+#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2)
+#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
+#define QSGMII_PHY_QSGMII_EN BIT(7)
+#define QSGMII_PHY_DEEMPHASIS_LVL_MASK GENMASK(11, 10)
+#define QSGMII_PHY_DEEMPHASIS_LVL(x) FIELD_PREP(QSGMII_PHY_DEEMPHASIS_LVL_MASK, (x))
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK GENMASK(14, 12)
+#define QSGMII_PHY_PHASE_LOOP_GAIN(x) FIELD_PREP(QSGMII_PHY_PHASE_LOOP_GAIN_MASK, (x))
+#define QSGMII_PHY_RX_DC_BIAS_MASK GENMASK(19, 18)
+#define QSGMII_PHY_RX_DC_BIAS(x) FIELD_PREP(QSGMII_PHY_RX_DC_BIAS_MASK, (x))
+#define QSGMII_PHY_RX_INPUT_EQU_MASK GENMASK(21, 20)
+#define QSGMII_PHY_RX_INPUT_EQU(x) FIELD_PREP(QSGMII_PHY_RX_INPUT_EQU_MASK, (x))
+#define QSGMII_PHY_CDR_PI_SLEW_MASK GENMASK(23, 22)
+#define QSGMII_PHY_CDR_PI_SLEW(x) FIELD_PREP(QSGMII_PHY_CDR_PI_SLEW_MASK, (x))
+#define QSGMII_PHY_TX_SLEW_MASK GENMASK(27, 26)
+#define QSGMII_PHY_TX_SLEW(x) FIELD_PREP(QSGMII_PHY_TX_SLEW_MASK, (x))
+#define QSGMII_PHY_TX_DRV_AMP_MASK GENMASK(31, 28)
+#define QSGMII_PHY_TX_DRV_AMP(x) FIELD_PREP(QSGMII_PHY_TX_DRV_AMP_MASK, (x))
+
+struct ipq806x_gmac {
+ struct platform_device *pdev;
+ struct regmap *nss_common;
+ struct regmap *qsgmii_csr;
+ uint32_t id;
+ struct clk *core_clk;
+ phy_interface_t phy_mode;
+};
+
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_SGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_SGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_SGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_RGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_RGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_RGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ uint32_t clk_bits, val;
+ int div;
+
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ div = get_clk_div_rgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ div = get_clk_div_sgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+ break;
+
+ default:
+ dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return -EINVAL;
+ }
+
+ /* Disable the clocks */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val &= ~clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ /* Set the divider */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val);
+ val &= ~(NSS_COMMON_CLK_DIV_MASK
+ << NSS_COMMON_CLK_DIV_OFFSET(gmac->id));
+ val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id);
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val);
+
+ /* Enable the clock back */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ return 0;
+}
+
+static int ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int ret;
+
+ ret = of_get_phy_mode(dev->of_node, &gmac->phy_mode);
+ if (ret) {
+ dev_err(dev, "missing phy mode property\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
+ dev_err(dev, "missing qcom id property\n");
+ return -EINVAL;
+ }
+
+ /* The GMACs are called 1 to 4 in the documentation, but to simplify the
+ * code and keep it consistent with the Linux convention, we'll number
+ * them from 0 to 3 here.
+ */
+ if (gmac->id > 3) {
+ dev_err(dev, "invalid gmac id\n");
+ return -EINVAL;
+ }
+
+ gmac->core_clk = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(gmac->core_clk)) {
+ dev_err(dev, "missing stmmaceth clk property\n");
+ return PTR_ERR(gmac->core_clk);
+ }
+ clk_set_rate(gmac->core_clk, 266000000);
+
+ /* Setup the register map for the nss common registers */
+ gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,nss-common");
+ if (IS_ERR(gmac->nss_common)) {
+ dev_err(dev, "missing nss-common node\n");
+ return PTR_ERR(gmac->nss_common);
+ }
+
+ /* Setup the register map for the qsgmii csr registers */
+ gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,qsgmii-csr");
+ if (IS_ERR(gmac->qsgmii_csr))
+ dev_err(dev, "missing qsgmii-csr node\n");
+
+ return PTR_ERR_OR_ZERO(gmac->qsgmii_csr);
+}
+
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct ipq806x_gmac *gmac = priv;
+
+ ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static int
+ipq806x_gmac_configure_qsgmii_pcs_speed(struct ipq806x_gmac *gmac)
+{
+ struct platform_device *pdev = gmac->pdev;
+ struct device *dev = &pdev->dev;
+ struct device_node *dn;
+ int link_speed;
+ int val = 0;
+ int ret;
+
+ /* Some bootloader may apply wrong configuration and cause
+ * not functioning port. If fixed link is not set,
+ * reset the force speed bit.
+ */
+ if (!of_phy_is_fixed_link(pdev->dev.of_node))
+ goto write;
+
+ dn = of_get_child_by_name(pdev->dev.of_node, "fixed-link");
+ ret = of_property_read_u32(dn, "speed", &link_speed);
+ of_node_put(dn);
+ if (ret) {
+ dev_err(dev, "found fixed-link node with no speed");
+ return ret;
+ }
+
+ val = QSGMII_PCS_CH_SPEED_FORCE;
+
+ switch (link_speed) {
+ case SPEED_1000:
+ val |= QSGMII_PCS_CH_SPEED_1000;
+ break;
+ case SPEED_100:
+ val |= QSGMII_PCS_CH_SPEED_100;
+ break;
+ case SPEED_10:
+ val |= QSGMII_PCS_CH_SPEED_10;
+ break;
+ }
+
+write:
+ regmap_update_bits(gmac->qsgmii_csr, QSGMII_PCS_ALL_CH_CTL,
+ QSGMII_PCS_CH_SPEED_MASK <<
+ QSGMII_PCS_CH_SPEED_SHIFT(gmac->id),
+ val <<
+ QSGMII_PCS_CH_SPEED_SHIFT(gmac->id));
+
+ return 0;
+}
+
+static const struct soc_device_attribute ipq806x_gmac_soc_v1[] = {
+ {
+ .revision = "1.*",
+ },
+ {
+ /* sentinel */
+ }
+};
+
+static int
+ipq806x_gmac_configure_qsgmii_params(struct ipq806x_gmac *gmac)
+{
+ struct platform_device *pdev = gmac->pdev;
+ const struct soc_device_attribute *soc;
+ struct device *dev = &pdev->dev;
+ u32 qsgmii_param;
+
+ switch (gmac->id) {
+ case 1:
+ soc = soc_device_match(ipq806x_gmac_soc_v1);
+
+ if (soc)
+ qsgmii_param = QSGMII_PHY_TX_DRV_AMP(0xc) |
+ QSGMII_PHY_TX_SLEW(0x2) |
+ QSGMII_PHY_DEEMPHASIS_LVL(0x2);
+ else
+ qsgmii_param = QSGMII_PHY_TX_DRV_AMP(0xd) |
+ QSGMII_PHY_TX_SLEW(0x0) |
+ QSGMII_PHY_DEEMPHASIS_LVL(0x0);
+
+ qsgmii_param |= QSGMII_PHY_RX_DC_BIAS(0x2);
+ break;
+ case 2:
+ case 3:
+ qsgmii_param = QSGMII_PHY_RX_DC_BIAS(0x3) |
+ QSGMII_PHY_TX_DRV_AMP(0xc);
+ break;
+ default: /* gmac 0 can't be set in SGMII mode */
+ dev_err(dev, "gmac id %d can't be in SGMII mode", gmac->id);
+ return -EINVAL;
+ }
+
+ /* Common params across all gmac id */
+ qsgmii_param |= QSGMII_PHY_CDR_EN |
+ QSGMII_PHY_RX_FRONT_EN |
+ QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+ QSGMII_PHY_TX_DRIVER_EN |
+ QSGMII_PHY_QSGMII_EN |
+ QSGMII_PHY_PHASE_LOOP_GAIN(0x4) |
+ QSGMII_PHY_RX_INPUT_EQU(0x1) |
+ QSGMII_PHY_CDR_PI_SLEW(0x2);
+
+ regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+ qsgmii_param);
+
+ return 0;
+}
+
+static int ipq806x_gmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ struct ipq806x_gmac *gmac;
+ int val;
+ int err;
+
+ val = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (val)
+ return val;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac) {
+ err = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ gmac->pdev = pdev;
+
+ err = ipq806x_gmac_of_parse(gmac);
+ if (err) {
+ dev_err(dev, "device tree parsing error\n");
+ goto err_remove_config_dt;
+ }
+
+ regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
+ QSGMII_PCS_CAL_LCKDT_CTL_RST);
+
+ /* Inter frame gap is set to 12 */
+ val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET |
+ 12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET;
+ /* We also initiate an AXI low power exit request */
+ val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ default:
+ goto err_unsupported_phy;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
+
+ /* Configure the clock src according to the mode */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
+ val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ default:
+ goto err_unsupported_phy;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
+
+ /* Enable PTP clock */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+ break;
+ default:
+ goto err_unsupported_phy;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ err = ipq806x_gmac_configure_qsgmii_params(gmac);
+ if (err)
+ goto err_remove_config_dt;
+
+ err = ipq806x_gmac_configure_qsgmii_pcs_speed(gmac);
+ if (err)
+ goto err_remove_config_dt;
+ }
+
+ plat_dat->has_gmac = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
+ plat_dat->multicast_filter_bins = 0;
+ plat_dat->tx_fifo_size = 8192;
+ plat_dat->rx_fifo_size = 8192;
+
+ err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (err)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_unsupported_phy:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ err = -EINVAL;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return err;
+}
+
+static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
+ { .compatible = "qcom,ipq806x-gmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
+
+static struct platform_driver ipq806x_gmac_dwmac_driver = {
+ .probe = ipq806x_gmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "ipq806x-gmac-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = ipq806x_gmac_dwmac_match,
+ },
+};
+module_platform_driver(ipq806x_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Mathieu Olivari <mathieu@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
new file mode 100644
index 000000000..e129ee102
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2020, Loongson Corporation
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
+#include <linux/device.h>
+#include <linux/of_irq.h>
+#include "stmmac.h"
+
+static int loongson_default_data(struct plat_stmmacenet_data *plat)
+{
+ plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->has_gmac = 1;
+ plat->force_sf_dma_mode = 1;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[0].use_prio = false;
+ plat->rx_queues_cfg[0].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[0].pkt_route = 0x0;
+
+ /* Default to phy auto-detection */
+ plat->phy_addr = -1;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+
+ plat->multicast_filter_bins = 256;
+ return 0;
+}
+
+static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res;
+ struct device_node *np;
+ int ret, i, phy_mode;
+
+ np = dev_of_node(&pdev->dev);
+
+ if (!np) {
+ pr_info("dwmac_loongson_pci: No OF node\n");
+ return -ENODEV;
+ }
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->mdio_node = of_get_child_by_name(np, "mdio");
+ if (plat->mdio_node) {
+ dev_info(&pdev->dev, "Found MDIO subnode\n");
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
+ plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL);
+ if (!plat->dma_cfg) {
+ ret = -ENOMEM;
+ goto err_put_node;
+ }
+
+ /* Enable pci device */
+ ret = pci_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__);
+ goto err_put_node;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev));
+ if (ret)
+ goto err_disable_device;
+ break;
+ }
+
+ plat->bus_id = of_alias_get_id(np, "ethernet");
+ if (plat->bus_id < 0)
+ plat->bus_id = pci_dev_id(pdev);
+
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0) {
+ dev_err(&pdev->dev, "phy_mode not found\n");
+ ret = phy_mode;
+ goto err_disable_device;
+ }
+
+ plat->phy_interface = phy_mode;
+ plat->interface = PHY_INTERFACE_MODE_GMII;
+
+ pci_set_master(pdev);
+
+ loongson_default_data(plat);
+ pci_enable_msi(pdev);
+ memset(&res, 0, sizeof(res));
+ res.addr = pcim_iomap_table(pdev)[0];
+
+ res.irq = of_irq_get_byname(np, "macirq");
+ if (res.irq < 0) {
+ dev_err(&pdev->dev, "IRQ macirq not found\n");
+ ret = -ENODEV;
+ goto err_disable_msi;
+ }
+
+ res.wol_irq = of_irq_get_byname(np, "eth_wake_irq");
+ if (res.wol_irq < 0) {
+ dev_info(&pdev->dev, "IRQ eth_wake_irq not found, using macirq\n");
+ res.wol_irq = res.irq;
+ }
+
+ res.lpi_irq = of_irq_get_byname(np, "eth_lpi");
+ if (res.lpi_irq < 0) {
+ dev_err(&pdev->dev, "IRQ eth_lpi not found\n");
+ ret = -ENODEV;
+ goto err_disable_msi;
+ }
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat, &res);
+ if (ret)
+ goto err_disable_msi;
+
+ return ret;
+
+err_disable_msi:
+ pci_disable_msi(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+err_put_node:
+ of_node_put(plat->mdio_node);
+ return ret;
+}
+
+static void loongson_dwmac_remove(struct pci_dev *pdev)
+{
+ struct net_device *ndev = dev_get_drvdata(&pdev->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int i;
+
+ of_node_put(priv->plat->mdio_node);
+ stmmac_dvr_remove(&pdev->dev);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+
+ pci_disable_msi(pdev);
+ pci_disable_device(pdev);
+}
+
+static int __maybe_unused loongson_dwmac_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int __maybe_unused loongson_dwmac_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(loongson_dwmac_pm_ops, loongson_dwmac_suspend,
+ loongson_dwmac_resume);
+
+static const struct pci_device_id loongson_dwmac_id_table[] = {
+ { PCI_VDEVICE(LOONGSON, 0x7a03) },
+ {}
+};
+MODULE_DEVICE_TABLE(pci, loongson_dwmac_id_table);
+
+static struct pci_driver loongson_dwmac_driver = {
+ .name = "dwmac-loongson-pci",
+ .id_table = loongson_dwmac_id_table,
+ .probe = loongson_dwmac_probe,
+ .remove = loongson_dwmac_remove,
+ .driver = {
+ .pm = &loongson_dwmac_pm_ops,
+ },
+};
+
+module_pci_driver(loongson_dwmac_driver);
+
+MODULE_DESCRIPTION("Loongson DWMAC PCI driver");
+MODULE_AUTHOR("Qing Zhang <zhangqing@loongson.cn>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
new file mode 100644
index 000000000..9d77c647b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-lpc18xx.c
@@ -0,0 +1,97 @@
+/*
+ * DWMAC glue for NXP LPC18xx/LPC43xx Ethernet
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+/* Register defines for CREG syscon */
+#define LPC18XX_CREG_CREG6 0x12c
+# define LPC18XX_CREG_CREG6_ETHMODE_MASK 0x7
+# define LPC18XX_CREG_CREG6_ETHMODE_MII 0x0
+# define LPC18XX_CREG_CREG6_ETHMODE_RMII 0x4
+
+static int lpc18xx_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct regmap *reg;
+ u8 ethmode;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ plat_dat->has_gmac = true;
+
+ reg = syscon_regmap_lookup_by_compatible("nxp,lpc1850-creg");
+ if (IS_ERR(reg)) {
+ dev_err(&pdev->dev, "syscon lookup failed\n");
+ ret = PTR_ERR(reg);
+ goto err_remove_config_dt;
+ }
+
+ if (plat_dat->interface == PHY_INTERFACE_MODE_MII) {
+ ethmode = LPC18XX_CREG_CREG6_ETHMODE_MII;
+ } else if (plat_dat->interface == PHY_INTERFACE_MODE_RMII) {
+ ethmode = LPC18XX_CREG_CREG6_ETHMODE_RMII;
+ } else {
+ dev_err(&pdev->dev, "Only MII and RMII mode supported\n");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ regmap_update_bits(reg, LPC18XX_CREG_CREG6,
+ LPC18XX_CREG_CREG6_ETHMODE_MASK, ethmode);
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static const struct of_device_id lpc18xx_dwmac_match[] = {
+ { .compatible = "nxp,lpc1850-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_dwmac_match);
+
+static struct platform_driver lpc18xx_dwmac_driver = {
+ .probe = lpc18xx_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "lpc18xx-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = lpc18xx_dwmac_match,
+ },
+};
+module_platform_driver(lpc18xx_dwmac_driver);
+
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_DESCRIPTION("DWMAC glue for LPC18xx/43xx Ethernet");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
new file mode 100644
index 000000000..9ae31e3dc
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
@@ -0,0 +1,715 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ */
+#include <linux/bitfield.h>
+#include <linux/io.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+/* Peri Configuration register for mt2712 */
+#define PERI_ETH_PHY_INTF_SEL 0x418
+#define PHY_INTF_MII 0
+#define PHY_INTF_RGMII 1
+#define PHY_INTF_RMII 4
+#define RMII_CLK_SRC_RXC BIT(4)
+#define RMII_CLK_SRC_INTERNAL BIT(5)
+
+#define PERI_ETH_DLY 0x428
+#define ETH_DLY_GTXC_INV BIT(6)
+#define ETH_DLY_GTXC_ENABLE BIT(5)
+#define ETH_DLY_GTXC_STAGES GENMASK(4, 0)
+#define ETH_DLY_TXC_INV BIT(20)
+#define ETH_DLY_TXC_ENABLE BIT(19)
+#define ETH_DLY_TXC_STAGES GENMASK(18, 14)
+#define ETH_DLY_RXC_INV BIT(13)
+#define ETH_DLY_RXC_ENABLE BIT(12)
+#define ETH_DLY_RXC_STAGES GENMASK(11, 7)
+
+#define PERI_ETH_DLY_FINE 0x800
+#define ETH_RMII_DLY_TX_INV BIT(2)
+#define ETH_FINE_DLY_GTXC BIT(1)
+#define ETH_FINE_DLY_RXC BIT(0)
+
+/* Peri Configuration register for mt8195 */
+#define MT8195_PERI_ETH_CTRL0 0xFD0
+#define MT8195_RMII_CLK_SRC_INTERNAL BIT(28)
+#define MT8195_RMII_CLK_SRC_RXC BIT(27)
+#define MT8195_ETH_INTF_SEL GENMASK(26, 24)
+#define MT8195_RGMII_TXC_PHASE_CTRL BIT(22)
+#define MT8195_EXT_PHY_MODE BIT(21)
+#define MT8195_DLY_GTXC_INV BIT(12)
+#define MT8195_DLY_GTXC_ENABLE BIT(5)
+#define MT8195_DLY_GTXC_STAGES GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL1 0xFD4
+#define MT8195_DLY_RXC_INV BIT(25)
+#define MT8195_DLY_RXC_ENABLE BIT(18)
+#define MT8195_DLY_RXC_STAGES GENMASK(17, 13)
+#define MT8195_DLY_TXC_INV BIT(12)
+#define MT8195_DLY_TXC_ENABLE BIT(5)
+#define MT8195_DLY_TXC_STAGES GENMASK(4, 0)
+
+#define MT8195_PERI_ETH_CTRL2 0xFD8
+#define MT8195_DLY_RMII_RXC_INV BIT(25)
+#define MT8195_DLY_RMII_RXC_ENABLE BIT(18)
+#define MT8195_DLY_RMII_RXC_STAGES GENMASK(17, 13)
+#define MT8195_DLY_RMII_TXC_INV BIT(12)
+#define MT8195_DLY_RMII_TXC_ENABLE BIT(5)
+#define MT8195_DLY_RMII_TXC_STAGES GENMASK(4, 0)
+
+struct mac_delay_struct {
+ u32 tx_delay;
+ u32 rx_delay;
+ bool tx_inv;
+ bool rx_inv;
+};
+
+struct mediatek_dwmac_plat_data {
+ const struct mediatek_dwmac_variant *variant;
+ struct mac_delay_struct mac_delay;
+ struct clk *rmii_internal_clk;
+ struct clk_bulk_data *clks;
+ struct regmap *peri_regmap;
+ struct device_node *np;
+ struct device *dev;
+ phy_interface_t phy_mode;
+ bool rmii_clk_from_mac;
+ bool rmii_rxc;
+ bool mac_wol;
+};
+
+struct mediatek_dwmac_variant {
+ int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat);
+ int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat);
+
+ /* clock ids to be requested */
+ const char * const *clk_list;
+ int num_clks;
+
+ u32 dma_bit_mask;
+ u32 rx_delay_max;
+ u32 tx_delay_max;
+};
+
+/* list of clocks required for mac */
+static const char * const mt2712_dwmac_clk_l[] = {
+ "axi", "apb", "mac_main", "ptp_ref"
+};
+
+static const char * const mt8195_dwmac_clk_l[] = {
+ "axi", "apb", "mac_cg", "mac_main", "ptp_ref"
+};
+
+static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat)
+{
+ int rmii_clk_from_mac = plat->rmii_clk_from_mac ? RMII_CLK_SRC_INTERNAL : 0;
+ int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0;
+ u32 intf_val = 0;
+
+ /* select phy interface in top control domain */
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val |= PHY_INTF_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val |= (PHY_INTF_RMII | rmii_rxc | rmii_clk_from_mac);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ intf_val |= PHY_INTF_RGMII;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ regmap_write(plat->peri_regmap, PERI_ETH_PHY_INTF_SEL, intf_val);
+
+ return 0;
+}
+
+static void mt2712_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ /* 550ps per stage for MII/RMII */
+ mac_delay->tx_delay /= 550;
+ mac_delay->rx_delay /= 550;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* 170ps per stage for RGMII */
+ mac_delay->tx_delay /= 170;
+ mac_delay->rx_delay /= 170;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ break;
+ }
+}
+
+static void mt2712_delay_stage2ps(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_RMII:
+ /* 550ps per stage for MII/RMII */
+ mac_delay->tx_delay *= 550;
+ mac_delay->rx_delay *= 550;
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ /* 170ps per stage for RGMII */
+ mac_delay->tx_delay *= 170;
+ mac_delay->rx_delay *= 170;
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ break;
+ }
+}
+
+static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 delay_val = 0, fine_val = 0;
+
+ mt2712_delay_ps2stage(plat);
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (plat->rmii_clk_from_mac) {
+ /* case 1: mac provides the rmii reference clock,
+ * and the clock output to TXC pin.
+ * The egress timing can be adjusted by GTXC delay macro circuit.
+ * The ingress timing can be adjusted by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv);
+ } else {
+ /* case 2: the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv);
+ }
+ /* tx_inv will inverse the tx clock inside mac relateive to
+ * reference clock from external phy,
+ * and this bit is located in the same register with fine-tune
+ */
+ if (mac_delay->tx_inv)
+ fine_val = ETH_RMII_DLY_TX_INV;
+ }
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ fine_val = ETH_FINE_DLY_GTXC | ETH_FINE_DLY_RXC;
+
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+ regmap_write(plat->peri_regmap, PERI_ETH_DLY, delay_val);
+ regmap_write(plat->peri_regmap, PERI_ETH_DLY_FINE, fine_val);
+
+ mt2712_delay_stage2ps(plat);
+
+ return 0;
+}
+
+static const struct mediatek_dwmac_variant mt2712_gmac_variant = {
+ .dwmac_set_phy_interface = mt2712_set_interface,
+ .dwmac_set_delay = mt2712_set_delay,
+ .clk_list = mt2712_dwmac_clk_l,
+ .num_clks = ARRAY_SIZE(mt2712_dwmac_clk_l),
+ .dma_bit_mask = 33,
+ .rx_delay_max = 17600,
+ .tx_delay_max = 17600,
+};
+
+static int mt8195_set_interface(struct mediatek_dwmac_plat_data *plat)
+{
+ int rmii_clk_from_mac = plat->rmii_clk_from_mac ? MT8195_RMII_CLK_SRC_INTERNAL : 0;
+ int rmii_rxc = plat->rmii_rxc ? MT8195_RMII_CLK_SRC_RXC : 0;
+ u32 intf_val = 0;
+
+ /* select phy interface in top control domain */
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_MII);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ intf_val |= (rmii_rxc | rmii_clk_from_mac);
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RMII);
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ intf_val |= FIELD_PREP(MT8195_ETH_INTF_SEL, PHY_INTF_RGMII);
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ /* MT8195 only support external PHY */
+ intf_val |= MT8195_EXT_PHY_MODE;
+
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL0, intf_val);
+
+ return 0;
+}
+
+static void mt8195_delay_ps2stage(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ /* 290ps per stage */
+ mac_delay->tx_delay /= 290;
+ mac_delay->rx_delay /= 290;
+}
+
+static void mt8195_delay_stage2ps(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+
+ /* 290ps per stage */
+ mac_delay->tx_delay *= 290;
+ mac_delay->rx_delay *= 290;
+}
+
+static int mt8195_set_delay(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 gtxc_delay_val = 0, delay_val = 0, rmii_delay_val = 0;
+
+ mt8195_delay_ps2stage(plat);
+
+ switch (plat->phy_mode) {
+ case PHY_INTERFACE_MODE_MII:
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE, !!mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES, mac_delay->tx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (plat->rmii_clk_from_mac) {
+ /* case 1: mac provides the rmii reference clock,
+ * and the clock output to TXC pin.
+ * The egress timing can be adjusted by RMII_TXC delay macro circuit.
+ * The ingress timing can be adjusted by RMII_RXC delay macro circuit.
+ */
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_ENABLE,
+ !!mac_delay->tx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_STAGES,
+ mac_delay->tx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_TXC_INV,
+ mac_delay->tx_inv);
+
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_ENABLE,
+ !!mac_delay->rx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_STAGES,
+ mac_delay->rx_delay);
+ rmii_delay_val |= FIELD_PREP(MT8195_DLY_RMII_RXC_INV,
+ mac_delay->rx_inv);
+ } else {
+ /* case 2: the rmii reference clock is from external phy,
+ * and the property "rmii_rxc" indicates which pin(TXC/RXC)
+ * the reference clk is connected to. The reference clock is a
+ * received signal, so rx_delay/rx_inv are used to indicate
+ * the reference clock timing adjustment
+ */
+ if (plat->rmii_rxc) {
+ /* the rmii reference clock from outside is connected
+ * to RXC pin, the reference clock will be adjusted
+ * by RXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE,
+ !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES,
+ mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV,
+ mac_delay->rx_inv);
+ } else {
+ /* the rmii reference clock from outside is connected
+ * to TXC pin, the reference clock will be adjusted
+ * by TXC delay macro circuit.
+ */
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_ENABLE,
+ !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_STAGES,
+ mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_TXC_INV,
+ mac_delay->rx_inv);
+ }
+ }
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_ENABLE, !!mac_delay->tx_delay);
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_STAGES, mac_delay->tx_delay);
+ gtxc_delay_val |= FIELD_PREP(MT8195_DLY_GTXC_INV, mac_delay->tx_inv);
+
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_ENABLE, !!mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_STAGES, mac_delay->rx_delay);
+ delay_val |= FIELD_PREP(MT8195_DLY_RXC_INV, mac_delay->rx_inv);
+
+ break;
+ default:
+ dev_err(plat->dev, "phy interface not supported\n");
+ return -EINVAL;
+ }
+
+ regmap_update_bits(plat->peri_regmap,
+ MT8195_PERI_ETH_CTRL0,
+ MT8195_RGMII_TXC_PHASE_CTRL |
+ MT8195_DLY_GTXC_INV |
+ MT8195_DLY_GTXC_ENABLE |
+ MT8195_DLY_GTXC_STAGES,
+ gtxc_delay_val);
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL1, delay_val);
+ regmap_write(plat->peri_regmap, MT8195_PERI_ETH_CTRL2, rmii_delay_val);
+
+ mt8195_delay_stage2ps(plat);
+
+ return 0;
+}
+
+static const struct mediatek_dwmac_variant mt8195_gmac_variant = {
+ .dwmac_set_phy_interface = mt8195_set_interface,
+ .dwmac_set_delay = mt8195_set_delay,
+ .clk_list = mt8195_dwmac_clk_l,
+ .num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l),
+ .dma_bit_mask = 35,
+ .rx_delay_max = 9280,
+ .tx_delay_max = 9280,
+};
+
+static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat)
+{
+ struct mac_delay_struct *mac_delay = &plat->mac_delay;
+ u32 tx_delay_ps, rx_delay_ps;
+ int err;
+
+ plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg");
+ if (IS_ERR(plat->peri_regmap)) {
+ dev_err(plat->dev, "Failed to get pericfg syscon\n");
+ return PTR_ERR(plat->peri_regmap);
+ }
+
+ err = of_get_phy_mode(plat->np, &plat->phy_mode);
+ if (err) {
+ dev_err(plat->dev, "not find phy-mode\n");
+ return err;
+ }
+
+ if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) {
+ if (tx_delay_ps < plat->variant->tx_delay_max) {
+ mac_delay->tx_delay = tx_delay_ps;
+ } else {
+ dev_err(plat->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(plat->np, "mediatek,rx-delay-ps", &rx_delay_ps)) {
+ if (rx_delay_ps < plat->variant->rx_delay_max) {
+ mac_delay->rx_delay = rx_delay_ps;
+ } else {
+ dev_err(plat->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps);
+ return -EINVAL;
+ }
+ }
+
+ mac_delay->tx_inv = of_property_read_bool(plat->np, "mediatek,txc-inverse");
+ mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse");
+ plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc");
+ plat->rmii_clk_from_mac = of_property_read_bool(plat->np, "mediatek,rmii-clk-from-mac");
+ plat->mac_wol = of_property_read_bool(plat->np, "mediatek,mac-wol");
+
+ return 0;
+}
+
+static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat)
+{
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int i, ret;
+
+ plat->clks = devm_kcalloc(plat->dev, variant->num_clks, sizeof(*plat->clks), GFP_KERNEL);
+ if (!plat->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < variant->num_clks; i++)
+ plat->clks[i].id = variant->clk_list[i];
+
+ ret = devm_clk_bulk_get(plat->dev, variant->num_clks, plat->clks);
+ if (ret)
+ return ret;
+
+ /* The clock labeled as "rmii_internal" is needed only in RMII(when
+ * MAC provides the reference clock), and useless for RGMII/MII or
+ * RMII(when PHY provides the reference clock).
+ * So, "rmii_internal" clock is got and configured only when
+ * reference clock of RMII is from MAC.
+ */
+ if (plat->rmii_clk_from_mac) {
+ plat->rmii_internal_clk = devm_clk_get(plat->dev, "rmii_internal");
+ if (IS_ERR(plat->rmii_internal_clk))
+ ret = PTR_ERR(plat->rmii_internal_clk);
+ } else {
+ plat->rmii_internal_clk = NULL;
+ }
+
+ return ret;
+}
+
+static int mediatek_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int ret;
+
+ if (variant->dwmac_set_phy_interface) {
+ ret = variant->dwmac_set_phy_interface(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (variant->dwmac_set_delay) {
+ ret = variant->dwmac_set_delay(plat);
+ if (ret) {
+ dev_err(plat->dev, "failed to set delay value, err = %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int mediatek_dwmac_clks_config(void *priv, bool enabled)
+{
+ struct mediatek_dwmac_plat_data *plat = priv;
+ const struct mediatek_dwmac_variant *variant = plat->variant;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable clks, err = %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(plat->rmii_internal_clk);
+ if (ret) {
+ dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret);
+ return ret;
+ }
+ } else {
+ clk_disable_unprepare(plat->rmii_internal_clk);
+ clk_bulk_disable_unprepare(variant->num_clks, plat->clks);
+ }
+
+ return ret;
+}
+
+static int mediatek_dwmac_common_data(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ struct mediatek_dwmac_plat_data *priv_plat)
+{
+ int i;
+
+ plat->interface = priv_plat->phy_mode;
+ plat->use_phy_wol = priv_plat->mac_wol ? 0 : 1;
+ plat->riwt_off = 1;
+ plat->maxmtu = ETH_DATA_LEN;
+ plat->host_dma_width = priv_plat->variant->dma_bit_mask;
+ plat->bsp_priv = priv_plat;
+ plat->init = mediatek_dwmac_init;
+ plat->clks_config = mediatek_dwmac_clks_config;
+
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 0;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 0;
+ plat->safety_feat_cfg->edpp = 1;
+ plat->safety_feat_cfg->prtyen = 1;
+ plat->safety_feat_cfg->tmouten = 1;
+
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ /* Default TX Q0 to use TSO and rest TXQ for TBS */
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
+ }
+
+ return 0;
+}
+
+static int mediatek_dwmac_probe(struct platform_device *pdev)
+{
+ struct mediatek_dwmac_plat_data *priv_plat;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ int ret;
+
+ priv_plat = devm_kzalloc(&pdev->dev, sizeof(*priv_plat), GFP_KERNEL);
+ if (!priv_plat)
+ return -ENOMEM;
+
+ priv_plat->variant = of_device_get_match_data(&pdev->dev);
+ if (!priv_plat->variant) {
+ dev_err(&pdev->dev, "Missing dwmac-mediatek variant\n");
+ return -EINVAL;
+ }
+
+ priv_plat->dev = &pdev->dev;
+ priv_plat->np = pdev->dev.of_node;
+
+ ret = mediatek_dwmac_config_dt(priv_plat);
+ if (ret)
+ return ret;
+
+ ret = mediatek_dwmac_clk_init(priv_plat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ mediatek_dwmac_common_data(pdev, plat_dat, priv_plat);
+ mediatek_dwmac_init(pdev, priv_plat);
+
+ ret = mediatek_dwmac_clks_config(priv_plat, true);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_drv_probe;
+
+ return 0;
+
+err_drv_probe:
+ mediatek_dwmac_clks_config(priv_plat, false);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int mediatek_dwmac_remove(struct platform_device *pdev)
+{
+ struct mediatek_dwmac_plat_data *priv_plat = get_stmmac_bsp_priv(&pdev->dev);
+ int ret;
+
+ ret = stmmac_pltfr_remove(pdev);
+ mediatek_dwmac_clks_config(priv_plat, false);
+
+ return ret;
+}
+
+static const struct of_device_id mediatek_dwmac_match[] = {
+ { .compatible = "mediatek,mt2712-gmac",
+ .data = &mt2712_gmac_variant },
+ { .compatible = "mediatek,mt8195-gmac",
+ .data = &mt8195_gmac_variant },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
+
+static struct platform_driver mediatek_dwmac_driver = {
+ .probe = mediatek_dwmac_probe,
+ .remove = mediatek_dwmac_remove,
+ .driver = {
+ .name = "dwmac-mediatek",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = mediatek_dwmac_match,
+ },
+};
+module_platform_driver(mediatek_dwmac_driver);
+
+MODULE_AUTHOR("Biao Huang <biao.huang@mediatek.com>");
+MODULE_DESCRIPTION("MediaTek DWMAC specific glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
new file mode 100644
index 000000000..16fb66a0c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c
@@ -0,0 +1,105 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Amlogic Meson6 and Meson8 DWMAC glue layer
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define ETHMAC_SPEED_100 BIT(1)
+
+struct meson_dwmac {
+ struct device *dev;
+ void __iomem *reg;
+};
+
+static void meson6_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct meson_dwmac *dwmac = priv;
+ unsigned int val;
+
+ val = readl(dwmac->reg);
+
+ switch (speed) {
+ case SPEED_10:
+ val &= ~ETHMAC_SPEED_100;
+ break;
+ case SPEED_100:
+ val |= ETHMAC_SPEED_100;
+ break;
+ }
+
+ writel(val, dwmac->reg);
+}
+
+static int meson6_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct meson_dwmac *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->reg = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dwmac->reg)) {
+ ret = PTR_ERR(dwmac->reg);
+ goto err_remove_config_dt;
+ }
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = meson6_dwmac_fix_mac_speed;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static const struct of_device_id meson6_dwmac_match[] = {
+ { .compatible = "amlogic,meson6-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, meson6_dwmac_match);
+
+static struct platform_driver meson6_dwmac_driver = {
+ .probe = meson6_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "meson6-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = meson6_dwmac_match,
+ },
+};
+module_platform_driver(meson6_dwmac_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson6 and Meson8 DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
new file mode 100644
index 000000000..e8b507f88
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Amlogic Meson8b, Meson8m2 and GXBB DWMAC glue layer
+ *
+ * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define PRG_ETH0 0x0
+
+#define PRG_ETH0_RGMII_MODE BIT(0)
+
+#define PRG_ETH0_EXT_PHY_MODE_MASK GENMASK(2, 0)
+#define PRG_ETH0_EXT_RGMII_MODE 1
+#define PRG_ETH0_EXT_RMII_MODE 4
+
+/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */
+#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4)
+
+/* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where 8ns are exactly one
+ * cycle of the 125MHz RGMII TX clock):
+ * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
+ */
+#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5)
+
+/* divider for the result of m250_sel */
+#define PRG_ETH0_CLK_M250_DIV_SHIFT 7
+#define PRG_ETH0_CLK_M250_DIV_WIDTH 3
+
+#define PRG_ETH0_RGMII_TX_CLK_EN 10
+
+#define PRG_ETH0_INVERTED_RMII_CLK BIT(11)
+#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12)
+
+/* Bypass (= 0, the signal from the GPIO input directly connects to the
+ * internal sampling) or enable (= 1) the internal logic for RXEN and RXD[3:0]
+ * timing tuning.
+ */
+#define PRG_ETH0_ADJ_ENABLE BIT(13)
+/* Controls whether the RXEN and RXD[3:0] signals should be aligned with the
+ * input RX rising/falling edge and sent to the Ethernet internals. This sets
+ * the automatically delay and skew automatically (internally).
+ */
+#define PRG_ETH0_ADJ_SETUP BIT(14)
+/* An internal counter based on the "timing-adjustment" clock. The counter is
+ * cleared on both, the falling and rising edge of the RX_CLK. This selects the
+ * delay (= the counter value) when to start sampling RXEN and RXD[3:0].
+ */
+#define PRG_ETH0_ADJ_DELAY GENMASK(19, 15)
+/* Adjusts the skew between each bit of RXEN and RXD[3:0]. If a signal has a
+ * large input delay, the bit for that signal (RXEN = bit 0, RXD[3] = bit 1,
+ * ...) can be configured to be 1 to compensate for a delay of about 1ns.
+ */
+#define PRG_ETH0_ADJ_SKEW GENMASK(24, 20)
+
+#define PRG_ETH1 0x4
+
+/* Defined for adding a delay to the input RX_CLK for better timing.
+ * Each step is 200ps. These bits are used with external RGMII PHYs
+ * because RGMII RX only has the small window. cfg_rxclk_dly can
+ * adjust the window between RX_CLK and RX_DATA and improve the stability
+ * of "rx data valid".
+ */
+#define PRG_ETH1_CFG_RXCLK_DLY GENMASK(19, 16)
+
+struct meson8b_dwmac;
+
+struct meson8b_dwmac_data {
+ int (*set_phy_mode)(struct meson8b_dwmac *dwmac);
+ bool has_prg_eth1_rgmii_rx_delay;
+};
+
+struct meson8b_dwmac {
+ struct device *dev;
+ void __iomem *regs;
+
+ const struct meson8b_dwmac_data *data;
+ phy_interface_t phy_mode;
+ struct clk *rgmii_tx_clk;
+ u32 tx_delay_ns;
+ u32 rx_delay_ps;
+ struct clk *timing_adj_clk;
+};
+
+struct meson8b_dwmac_clk_configs {
+ struct clk_mux m250_mux;
+ struct clk_divider m250_div;
+ struct clk_fixed_factor fixed_div2;
+ struct clk_gate rgmii_tx_en;
+};
+
+static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
+ u32 mask, u32 value)
+{
+ u32 data;
+
+ data = readl(dwmac->regs + reg);
+ data &= ~mask;
+ data |= (value & mask);
+
+ writel(data, dwmac->regs + reg);
+}
+
+static struct clk *meson8b_dwmac_register_clk(struct meson8b_dwmac *dwmac,
+ const char *name_suffix,
+ const struct clk_parent_data *parents,
+ int num_parents,
+ const struct clk_ops *ops,
+ struct clk_hw *hw)
+{
+ struct clk_init_data init = { };
+ char clk_name[32];
+
+ snprintf(clk_name, sizeof(clk_name), "%s#%s", dev_name(dwmac->dev),
+ name_suffix);
+
+ init.name = clk_name;
+ init.ops = ops;
+ init.flags = CLK_SET_RATE_PARENT;
+ init.parent_data = parents;
+ init.num_parents = num_parents;
+
+ hw->init = &init;
+
+ return devm_clk_register(dwmac->dev, hw);
+}
+
+static int meson8b_init_rgmii_tx_clk(struct meson8b_dwmac *dwmac)
+{
+ struct clk *clk;
+ struct device *dev = dwmac->dev;
+ static const struct clk_parent_data mux_parents[] = {
+ { .fw_name = "clkin0", },
+ { .index = -1, },
+ };
+ static const struct clk_div_table div_table[] = {
+ { .div = 2, .val = 2, },
+ { .div = 3, .val = 3, },
+ { .div = 4, .val = 4, },
+ { .div = 5, .val = 5, },
+ { .div = 6, .val = 6, },
+ { .div = 7, .val = 7, },
+ { /* end of array */ }
+ };
+ struct meson8b_dwmac_clk_configs *clk_configs;
+ struct clk_parent_data parent_data = { };
+
+ clk_configs = devm_kzalloc(dev, sizeof(*clk_configs), GFP_KERNEL);
+ if (!clk_configs)
+ return -ENOMEM;
+
+ clk_configs->m250_mux.reg = dwmac->regs + PRG_ETH0;
+ clk_configs->m250_mux.shift = __ffs(PRG_ETH0_CLK_M250_SEL_MASK);
+ clk_configs->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK >>
+ clk_configs->m250_mux.shift;
+ clk = meson8b_dwmac_register_clk(dwmac, "m250_sel", mux_parents,
+ ARRAY_SIZE(mux_parents), &clk_mux_ops,
+ &clk_configs->m250_mux.hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ parent_data.hw = &clk_configs->m250_mux.hw;
+ clk_configs->m250_div.reg = dwmac->regs + PRG_ETH0;
+ clk_configs->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT;
+ clk_configs->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH;
+ clk_configs->m250_div.table = div_table;
+ clk_configs->m250_div.flags = CLK_DIVIDER_ALLOW_ZERO |
+ CLK_DIVIDER_ROUND_CLOSEST;
+ clk = meson8b_dwmac_register_clk(dwmac, "m250_div", &parent_data, 1,
+ &clk_divider_ops,
+ &clk_configs->m250_div.hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ parent_data.hw = &clk_configs->m250_div.hw;
+ clk_configs->fixed_div2.mult = 1;
+ clk_configs->fixed_div2.div = 2;
+ clk = meson8b_dwmac_register_clk(dwmac, "fixed_div2", &parent_data, 1,
+ &clk_fixed_factor_ops,
+ &clk_configs->fixed_div2.hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ parent_data.hw = &clk_configs->fixed_div2.hw;
+ clk_configs->rgmii_tx_en.reg = dwmac->regs + PRG_ETH0;
+ clk_configs->rgmii_tx_en.bit_idx = PRG_ETH0_RGMII_TX_CLK_EN;
+ clk = meson8b_dwmac_register_clk(dwmac, "rgmii_tx_en", &parent_data, 1,
+ &clk_gate_ops,
+ &clk_configs->rgmii_tx_en.hw);
+ if (WARN_ON(IS_ERR(clk)))
+ return PTR_ERR(clk);
+
+ dwmac->rgmii_tx_clk = clk;
+
+ return 0;
+}
+
+static int meson8b_set_phy_mode(struct meson8b_dwmac *dwmac)
+{
+ switch (dwmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* enable RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_RGMII_MODE,
+ PRG_ETH0_RGMII_MODE);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ /* disable RGMII mode -> enables RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_RGMII_MODE, 0);
+ break;
+ default:
+ dev_err(dwmac->dev, "fail to set phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int meson_axg_set_phy_mode(struct meson8b_dwmac *dwmac)
+{
+ switch (dwmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ /* enable RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_EXT_PHY_MODE_MASK,
+ PRG_ETH0_EXT_RGMII_MODE);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ /* disable RGMII mode -> enables RMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_EXT_PHY_MODE_MASK,
+ PRG_ETH0_EXT_RMII_MODE);
+ break;
+ default:
+ dev_err(dwmac->dev, "fail to set phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac,
+ struct clk *clk)
+{
+ int ret;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ return devm_add_action_or_reset(dwmac->dev,
+ (void(*)(void *))clk_disable_unprepare,
+ clk);
+}
+
+static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac)
+{
+ u32 tx_dly_config, rx_adj_config, cfg_rxclk_dly, delay_config;
+ int ret;
+
+ rx_adj_config = 0;
+ cfg_rxclk_dly = 0;
+ tx_dly_config = FIELD_PREP(PRG_ETH0_TXDLY_MASK,
+ dwmac->tx_delay_ns >> 1);
+
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay)
+ cfg_rxclk_dly = FIELD_PREP(PRG_ETH1_CFG_RXCLK_DLY,
+ dwmac->rx_delay_ps / 200);
+ else if (dwmac->rx_delay_ps == 2000)
+ rx_adj_config = PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP;
+
+ switch (dwmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ delay_config = tx_dly_config | rx_adj_config;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ delay_config = tx_dly_config;
+ cfg_rxclk_dly = 0;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ delay_config = rx_adj_config;
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RMII:
+ delay_config = 0;
+ cfg_rxclk_dly = 0;
+ break;
+ default:
+ dev_err(dwmac->dev, "unsupported phy-mode %s\n",
+ phy_modes(dwmac->phy_mode));
+ return -EINVAL;
+ }
+
+ if (delay_config & PRG_ETH0_ADJ_ENABLE) {
+ if (!dwmac->timing_adj_clk) {
+ dev_err(dwmac->dev,
+ "The timing-adjustment clock is mandatory for the RX delay re-timing\n");
+ return -EINVAL;
+ }
+
+ /* The timing adjustment logic is driven by a separate clock */
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->timing_adj_clk);
+ if (ret) {
+ dev_err(dwmac->dev,
+ "Failed to enable the timing-adjustment clock\n");
+ return ret;
+ }
+ }
+
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK |
+ PRG_ETH0_ADJ_ENABLE | PRG_ETH0_ADJ_SETUP |
+ PRG_ETH0_ADJ_DELAY | PRG_ETH0_ADJ_SKEW,
+ delay_config);
+
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH1, PRG_ETH1_CFG_RXCLK_DLY,
+ cfg_rxclk_dly);
+
+ return 0;
+}
+
+static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
+{
+ int ret;
+
+ if (phy_interface_mode_is_rgmii(dwmac->phy_mode)) {
+ /* only relevant for RMII mode -> disable in RGMII mode */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_INVERTED_RMII_CLK, 0);
+
+ /* Configure the 125MHz RGMII TX clock, the IP block changes
+ * the output automatically (= without us having to configure
+ * a register) based on the line-speed (125MHz for Gbit speeds,
+ * 25MHz for 100Mbit/s and 2.5MHz for 10Mbit/s).
+ */
+ ret = clk_set_rate(dwmac->rgmii_tx_clk, 125 * 1000 * 1000);
+ if (ret) {
+ dev_err(dwmac->dev,
+ "failed to set RGMII TX clock\n");
+ return ret;
+ }
+
+ ret = meson8b_devm_clk_prepare_enable(dwmac,
+ dwmac->rgmii_tx_clk);
+ if (ret) {
+ dev_err(dwmac->dev,
+ "failed to enable the RGMII TX clock\n");
+ return ret;
+ }
+ } else {
+ /* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
+ PRG_ETH0_INVERTED_RMII_CLK,
+ PRG_ETH0_INVERTED_RMII_CLK);
+ }
+
+ /* enable TX_CLK and PHY_REF_CLK generator */
+ meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK,
+ PRG_ETH0_TX_AND_PHY_REF_CLK);
+
+ return 0;
+}
+
+static int meson8b_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct meson8b_dwmac *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->data = (const struct meson8b_dwmac_data *)
+ of_device_get_match_data(&pdev->dev);
+ if (!dwmac->data) {
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+ dwmac->regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(dwmac->regs)) {
+ ret = PTR_ERR(dwmac->regs);
+ goto err_remove_config_dt;
+ }
+
+ dwmac->dev = &pdev->dev;
+ ret = of_get_phy_mode(pdev->dev.of_node, &dwmac->phy_mode);
+ if (ret) {
+ dev_err(&pdev->dev, "missing phy-mode property\n");
+ goto err_remove_config_dt;
+ }
+
+ /* use 2ns as fallback since this value was previously hardcoded */
+ if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
+ &dwmac->tx_delay_ns))
+ dwmac->tx_delay_ns = 2;
+
+ /* RX delay defaults to 0ps since this is what many boards use */
+ if (of_property_read_u32(pdev->dev.of_node, "rx-internal-delay-ps",
+ &dwmac->rx_delay_ps)) {
+ if (!of_property_read_u32(pdev->dev.of_node,
+ "amlogic,rx-delay-ns",
+ &dwmac->rx_delay_ps))
+ /* convert ns to ps */
+ dwmac->rx_delay_ps *= 1000;
+ }
+
+ if (dwmac->data->has_prg_eth1_rgmii_rx_delay) {
+ if (dwmac->rx_delay_ps > 3000 || dwmac->rx_delay_ps % 200) {
+ dev_err(dwmac->dev,
+ "The RGMII RX delay range is 0..3000ps in 200ps steps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+ } else {
+ if (dwmac->rx_delay_ps != 0 && dwmac->rx_delay_ps != 2000) {
+ dev_err(dwmac->dev,
+ "The only allowed RGMII RX delays values are: 0ps, 2000ps");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+ }
+
+ dwmac->timing_adj_clk = devm_clk_get_optional(dwmac->dev,
+ "timing-adjustment");
+ if (IS_ERR(dwmac->timing_adj_clk)) {
+ ret = PTR_ERR(dwmac->timing_adj_clk);
+ goto err_remove_config_dt;
+ }
+
+ ret = meson8b_init_rgmii_delays(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = meson8b_init_rgmii_tx_clk(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = dwmac->data->set_phy_mode(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = meson8b_init_prg_eth(dwmac);
+ if (ret)
+ goto err_remove_config_dt;
+
+ plat_dat->bsp_priv = dwmac;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
+
+ return 0;
+
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static const struct meson8b_dwmac_data meson8b_dwmac_data = {
+ .set_phy_mode = meson8b_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
+};
+
+static const struct meson8b_dwmac_data meson_axg_dwmac_data = {
+ .set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = false,
+};
+
+static const struct meson8b_dwmac_data meson_g12a_dwmac_data = {
+ .set_phy_mode = meson_axg_set_phy_mode,
+ .has_prg_eth1_rgmii_rx_delay = true,
+};
+
+static const struct of_device_id meson8b_dwmac_match[] = {
+ {
+ .compatible = "amlogic,meson8b-dwmac",
+ .data = &meson8b_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson8m2-dwmac",
+ .data = &meson8b_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson-gxbb-dwmac",
+ .data = &meson8b_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson-axg-dwmac",
+ .data = &meson_axg_dwmac_data,
+ },
+ {
+ .compatible = "amlogic,meson-g12a-dwmac",
+ .data = &meson_g12a_dwmac_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, meson8b_dwmac_match);
+
+static struct platform_driver meson8b_dwmac_driver = {
+ .probe = meson8b_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "meson8b-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = meson8b_dwmac_match,
+ },
+};
+module_platform_driver(meson8b_dwmac_driver);
+
+MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>");
+MODULE_DESCRIPTION("Amlogic Meson8b, Meson8m2 and GXBB DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
new file mode 100644
index 000000000..62a69a91a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-oxnas.c
@@ -0,0 +1,245 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Oxford Semiconductor OXNAS DWMAC glue layer
+ *
+ * Copyright (C) 2016 Neil Armstrong <narmstrong@baylibre.com>
+ * Copyright (C) 2014 Daniel Golle <daniel@makrotopia.org>
+ * Copyright (C) 2013 Ma Haijun <mahaijuns@gmail.com>
+ * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+/* System Control regmap offsets */
+#define OXNAS_DWMAC_CTRL_REGOFFSET 0x78
+#define OXNAS_DWMAC_DELAY_REGOFFSET 0x100
+
+/* Control Register */
+#define DWMAC_CKEN_RX_IN 14
+#define DWMAC_CKEN_RXN_OUT 13
+#define DWMAC_CKEN_RX_OUT 12
+#define DWMAC_CKEN_TX_IN 10
+#define DWMAC_CKEN_TXN_OUT 9
+#define DWMAC_CKEN_TX_OUT 8
+#define DWMAC_RX_SOURCE 7
+#define DWMAC_TX_SOURCE 6
+#define DWMAC_LOW_TX_SOURCE 4
+#define DWMAC_AUTO_TX_SOURCE 3
+#define DWMAC_RGMII 2
+#define DWMAC_SIMPLE_MUX 1
+#define DWMAC_CKEN_GTX 0
+
+/* Delay register */
+#define DWMAC_TX_VARDELAY_SHIFT 0
+#define DWMAC_TXN_VARDELAY_SHIFT 8
+#define DWMAC_RX_VARDELAY_SHIFT 16
+#define DWMAC_RXN_VARDELAY_SHIFT 24
+#define DWMAC_TX_VARDELAY(d) ((d) << DWMAC_TX_VARDELAY_SHIFT)
+#define DWMAC_TXN_VARDELAY(d) ((d) << DWMAC_TXN_VARDELAY_SHIFT)
+#define DWMAC_RX_VARDELAY(d) ((d) << DWMAC_RX_VARDELAY_SHIFT)
+#define DWMAC_RXN_VARDELAY(d) ((d) << DWMAC_RXN_VARDELAY_SHIFT)
+
+struct oxnas_dwmac;
+
+struct oxnas_dwmac_data {
+ int (*setup)(struct oxnas_dwmac *dwmac);
+};
+
+struct oxnas_dwmac {
+ struct device *dev;
+ struct clk *clk;
+ struct regmap *regmap;
+ const struct oxnas_dwmac_data *data;
+};
+
+static int oxnas_dwmac_setup_ox810se(struct oxnas_dwmac *dwmac)
+{
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
+ if (ret < 0)
+ return ret;
+
+ /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
+ value |= BIT(DWMAC_CKEN_GTX) |
+ /* Use simple mux for 25/125 Mhz clock switching */
+ BIT(DWMAC_SIMPLE_MUX);
+
+ regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
+
+ return 0;
+}
+
+static int oxnas_dwmac_setup_ox820(struct oxnas_dwmac *dwmac)
+{
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, &value);
+ if (ret < 0)
+ return ret;
+
+ /* Enable GMII_GTXCLK to follow GMII_REFCLK, required for gigabit PHY */
+ value |= BIT(DWMAC_CKEN_GTX) |
+ /* Use simple mux for 25/125 Mhz clock switching */
+ BIT(DWMAC_SIMPLE_MUX) |
+ /* set auto switch tx clock source */
+ BIT(DWMAC_AUTO_TX_SOURCE) |
+ /* enable tx & rx vardelay */
+ BIT(DWMAC_CKEN_TX_OUT) |
+ BIT(DWMAC_CKEN_TXN_OUT) |
+ BIT(DWMAC_CKEN_TX_IN) |
+ BIT(DWMAC_CKEN_RX_OUT) |
+ BIT(DWMAC_CKEN_RXN_OUT) |
+ BIT(DWMAC_CKEN_RX_IN);
+ regmap_write(dwmac->regmap, OXNAS_DWMAC_CTRL_REGOFFSET, value);
+
+ /* set tx & rx vardelay */
+ value = DWMAC_TX_VARDELAY(4) |
+ DWMAC_TXN_VARDELAY(2) |
+ DWMAC_RX_VARDELAY(10) |
+ DWMAC_RXN_VARDELAY(8);
+ regmap_write(dwmac->regmap, OXNAS_DWMAC_DELAY_REGOFFSET, value);
+
+ return 0;
+}
+
+static int oxnas_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct oxnas_dwmac *dwmac = priv;
+ int ret;
+
+ /* Reset HW here before changing the glue configuration */
+ ret = device_reset(dwmac->dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ return ret;
+
+ ret = dwmac->data->setup(dwmac);
+ if (ret)
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+static void oxnas_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct oxnas_dwmac *dwmac = priv;
+
+ clk_disable_unprepare(dwmac->clk);
+}
+
+static int oxnas_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct oxnas_dwmac *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->data = (const struct oxnas_dwmac_data *)of_device_get_match_data(&pdev->dev);
+ if (!dwmac->data) {
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->dev = &pdev->dev;
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->init = oxnas_dwmac_init;
+ plat_dat->exit = oxnas_dwmac_exit;
+
+ dwmac->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "oxsemi,sys-ctrl");
+ if (IS_ERR(dwmac->regmap)) {
+ dev_err(&pdev->dev, "failed to have sysctrl regmap\n");
+ ret = PTR_ERR(dwmac->regmap);
+ goto err_remove_config_dt;
+ }
+
+ dwmac->clk = devm_clk_get(&pdev->dev, "gmac");
+ if (IS_ERR(dwmac->clk)) {
+ ret = PTR_ERR(dwmac->clk);
+ goto err_remove_config_dt;
+ }
+
+ ret = oxnas_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_dwmac_exit;
+
+
+ return 0;
+
+err_dwmac_exit:
+ oxnas_dwmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static const struct oxnas_dwmac_data ox810se_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox810se,
+};
+
+static const struct oxnas_dwmac_data ox820_dwmac_data = {
+ .setup = oxnas_dwmac_setup_ox820,
+};
+
+static const struct of_device_id oxnas_dwmac_match[] = {
+ {
+ .compatible = "oxsemi,ox810se-dwmac",
+ .data = &ox810se_dwmac_data,
+ },
+ {
+ .compatible = "oxsemi,ox820-dwmac",
+ .data = &ox820_dwmac_data,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, oxnas_dwmac_match);
+
+static struct platform_driver oxnas_dwmac_driver = {
+ .probe = oxnas_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "oxnas-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = oxnas_dwmac_match,
+ },
+};
+module_platform_driver(oxnas_dwmac_driver);
+
+MODULE_AUTHOR("Neil Armstrong <narmstrong@baylibre.com>");
+MODULE_DESCRIPTION("Oxford Semiconductor OXNAS DWMAC glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
new file mode 100644
index 000000000..732774645
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
@@ -0,0 +1,615 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2018-19, Linaro Limited
+
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#define RGMII_IO_MACRO_CONFIG 0x0
+#define SDCC_HC_REG_DLL_CONFIG 0x4
+#define SDCC_HC_REG_DDR_CONFIG 0xC
+#define SDCC_HC_REG_DLL_CONFIG2 0x10
+#define SDC4_STATUS 0x14
+#define SDCC_USR_CTL 0x18
+#define RGMII_IO_MACRO_CONFIG2 0x1C
+#define RGMII_IO_MACRO_DEBUG1 0x20
+#define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28
+
+/* RGMII_IO_MACRO_CONFIG fields */
+#define RGMII_CONFIG_FUNC_CLK_EN BIT(30)
+#define RGMII_CONFIG_POS_NEG_DATA_SEL BIT(23)
+#define RGMII_CONFIG_GPIO_CFG_RX_INT GENMASK(21, 20)
+#define RGMII_CONFIG_GPIO_CFG_TX_INT GENMASK(19, 17)
+#define RGMII_CONFIG_MAX_SPD_PRG_9 GENMASK(16, 8)
+#define RGMII_CONFIG_MAX_SPD_PRG_2 GENMASK(7, 6)
+#define RGMII_CONFIG_INTF_SEL GENMASK(5, 4)
+#define RGMII_CONFIG_BYPASS_TX_ID_EN BIT(3)
+#define RGMII_CONFIG_LOOPBACK_EN BIT(2)
+#define RGMII_CONFIG_PROG_SWAP BIT(1)
+#define RGMII_CONFIG_DDR_MODE BIT(0)
+
+/* SDCC_HC_REG_DLL_CONFIG fields */
+#define SDCC_DLL_CONFIG_DLL_RST BIT(30)
+#define SDCC_DLL_CONFIG_PDN BIT(29)
+#define SDCC_DLL_CONFIG_MCLK_FREQ GENMASK(26, 24)
+#define SDCC_DLL_CONFIG_CDR_SELEXT GENMASK(23, 20)
+#define SDCC_DLL_CONFIG_CDR_EXT_EN BIT(19)
+#define SDCC_DLL_CONFIG_CK_OUT_EN BIT(18)
+#define SDCC_DLL_CONFIG_CDR_EN BIT(17)
+#define SDCC_DLL_CONFIG_DLL_EN BIT(16)
+#define SDCC_DLL_MCLK_GATING_EN BIT(5)
+#define SDCC_DLL_CDR_FINE_PHASE GENMASK(3, 2)
+
+/* SDCC_HC_REG_DDR_CONFIG fields */
+#define SDCC_DDR_CONFIG_PRG_DLY_EN BIT(31)
+#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY GENMASK(26, 21)
+#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE GENMASK(29, 27)
+#define SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN BIT(30)
+#define SDCC_DDR_CONFIG_PRG_RCLK_DLY GENMASK(8, 0)
+
+/* SDCC_HC_REG_DLL_CONFIG2 fields */
+#define SDCC_DLL_CONFIG2_DLL_CLOCK_DIS BIT(21)
+#define SDCC_DLL_CONFIG2_MCLK_FREQ_CALC GENMASK(17, 10)
+#define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL GENMASK(3, 2)
+#define SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW BIT(1)
+#define SDCC_DLL_CONFIG2_DDR_CAL_EN BIT(0)
+
+/* SDC4_STATUS bits */
+#define SDC4_STATUS_DLL_LOCK BIT(7)
+
+/* RGMII_IO_MACRO_CONFIG2 fields */
+#define RGMII_CONFIG2_RSVD_CONFIG15 GENMASK(31, 17)
+#define RGMII_CONFIG2_RGMII_CLK_SEL_CFG BIT(16)
+#define RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN BIT(13)
+#define RGMII_CONFIG2_CLK_DIVIDE_SEL BIT(12)
+#define RGMII_CONFIG2_RX_PROG_SWAP BIT(7)
+#define RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL BIT(6)
+#define RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN BIT(5)
+
+struct ethqos_emac_por {
+ unsigned int offset;
+ unsigned int value;
+};
+
+struct ethqos_emac_driver_data {
+ const struct ethqos_emac_por *por;
+ unsigned int num_por;
+ bool rgmii_config_looback_en;
+};
+
+struct qcom_ethqos {
+ struct platform_device *pdev;
+ void __iomem *rgmii_base;
+
+ unsigned int rgmii_clk_rate;
+ struct clk *rgmii_clk;
+ unsigned int speed;
+
+ const struct ethqos_emac_por *por;
+ unsigned int num_por;
+ bool rgmii_config_looback_en;
+};
+
+static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset)
+{
+ return readl(ethqos->rgmii_base + offset);
+}
+
+static void rgmii_writel(struct qcom_ethqos *ethqos,
+ int value, unsigned int offset)
+{
+ writel(value, ethqos->rgmii_base + offset);
+}
+
+static void rgmii_updatel(struct qcom_ethqos *ethqos,
+ int mask, int val, unsigned int offset)
+{
+ unsigned int temp;
+
+ temp = rgmii_readl(ethqos, offset);
+ temp = (temp & ~(mask)) | val;
+ rgmii_writel(ethqos, temp, offset);
+}
+
+static void rgmii_dump(void *priv)
+{
+ struct qcom_ethqos *ethqos = priv;
+
+ dev_dbg(&ethqos->pdev->dev, "Rgmii register dump\n");
+ dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG: %x\n",
+ rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG: %x\n",
+ rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DDR_CONFIG: %x\n",
+ rgmii_readl(ethqos, SDCC_HC_REG_DDR_CONFIG));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_HC_REG_DLL_CONFIG2: %x\n",
+ rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG2));
+ dev_dbg(&ethqos->pdev->dev, "SDC4_STATUS: %x\n",
+ rgmii_readl(ethqos, SDC4_STATUS));
+ dev_dbg(&ethqos->pdev->dev, "SDCC_USR_CTL: %x\n",
+ rgmii_readl(ethqos, SDCC_USR_CTL));
+ dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_CONFIG2: %x\n",
+ rgmii_readl(ethqos, RGMII_IO_MACRO_CONFIG2));
+ dev_dbg(&ethqos->pdev->dev, "RGMII_IO_MACRO_DEBUG1: %x\n",
+ rgmii_readl(ethqos, RGMII_IO_MACRO_DEBUG1));
+ dev_dbg(&ethqos->pdev->dev, "EMAC_SYSTEM_LOW_POWER_DEBUG: %x\n",
+ rgmii_readl(ethqos, EMAC_SYSTEM_LOW_POWER_DEBUG));
+}
+
+/* Clock rates */
+#define RGMII_1000_NOM_CLK_FREQ (250 * 1000 * 1000UL)
+#define RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ (50 * 1000 * 1000UL)
+#define RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ (5 * 1000 * 1000UL)
+
+static void
+ethqos_update_rgmii_clk(struct qcom_ethqos *ethqos, unsigned int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ ethqos->rgmii_clk_rate = RGMII_1000_NOM_CLK_FREQ;
+ break;
+
+ case SPEED_100:
+ ethqos->rgmii_clk_rate = RGMII_ID_MODE_100_LOW_SVS_CLK_FREQ;
+ break;
+
+ case SPEED_10:
+ ethqos->rgmii_clk_rate = RGMII_ID_MODE_10_LOW_SVS_CLK_FREQ;
+ break;
+ }
+
+ clk_set_rate(ethqos->rgmii_clk, ethqos->rgmii_clk_rate);
+}
+
+static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos)
+{
+ rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN,
+ RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG);
+}
+
+static const struct ethqos_emac_por emac_v2_3_0_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x00C01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+static const struct ethqos_emac_driver_data emac_v2_3_0_data = {
+ .por = emac_v2_3_0_por,
+ .num_por = ARRAY_SIZE(emac_v2_3_0_por),
+ .rgmii_config_looback_en = true,
+};
+
+static const struct ethqos_emac_por emac_v2_1_0_por[] = {
+ { .offset = RGMII_IO_MACRO_CONFIG, .value = 0x40C01343 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG, .value = 0x2004642C },
+ { .offset = SDCC_HC_REG_DDR_CONFIG, .value = 0x00000000 },
+ { .offset = SDCC_HC_REG_DLL_CONFIG2, .value = 0x00200000 },
+ { .offset = SDCC_USR_CTL, .value = 0x00010800 },
+ { .offset = RGMII_IO_MACRO_CONFIG2, .value = 0x00002060 },
+};
+
+static const struct ethqos_emac_driver_data emac_v2_1_0_data = {
+ .por = emac_v2_1_0_por,
+ .num_por = ARRAY_SIZE(emac_v2_1_0_por),
+ .rgmii_config_looback_en = false,
+};
+
+static int ethqos_dll_configure(struct qcom_ethqos *ethqos)
+{
+ unsigned int val;
+ int retry = 1000;
+
+ /* Set CDR_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EN,
+ SDCC_DLL_CONFIG_CDR_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set CDR_EXT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CDR_EXT_EN,
+ SDCC_DLL_CONFIG_CDR_EXT_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Clear CK_OUT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set DLL_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ rgmii_updatel(ethqos, SDCC_DLL_MCLK_GATING_EN,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CDR_FINE_PHASE,
+ 0, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Wait for CK_OUT_EN clear */
+ do {
+ val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
+ val &= SDCC_DLL_CONFIG_CK_OUT_EN;
+ if (!val)
+ break;
+ mdelay(1);
+ retry--;
+ } while (retry > 0);
+ if (!retry)
+ dev_err(&ethqos->pdev->dev, "Clear CK_OUT_EN timedout\n");
+
+ /* Set CK_OUT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_DLL_CONFIG_CK_OUT_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Wait for CK_OUT_EN set */
+ retry = 1000;
+ do {
+ val = rgmii_readl(ethqos, SDCC_HC_REG_DLL_CONFIG);
+ val &= SDCC_DLL_CONFIG_CK_OUT_EN;
+ if (val)
+ break;
+ mdelay(1);
+ retry--;
+ } while (retry > 0);
+ if (!retry)
+ dev_err(&ethqos->pdev->dev, "Set CK_OUT_EN timedout\n");
+
+ /* Set DDR_CAL_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_CAL_EN,
+ SDCC_DLL_CONFIG2_DDR_CAL_EN, SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DLL_CLOCK_DIS,
+ 0, SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_MCLK_FREQ_CALC,
+ 0x1A << 10, SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SEL,
+ BIT(2), SDCC_HC_REG_DLL_CONFIG2);
+
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_DLL_CONFIG2_DDR_TRAFFIC_INIT_SW,
+ SDCC_HC_REG_DLL_CONFIG2);
+
+ return 0;
+}
+
+static int ethqos_rgmii_macro_init(struct qcom_ethqos *ethqos)
+{
+ /* Disable loopback mode */
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_TO_RX_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG2);
+
+ /* Select RGMII, write 0 to interface select */
+ rgmii_updatel(ethqos, RGMII_CONFIG_INTF_SEL,
+ 0, RGMII_IO_MACRO_CONFIG);
+
+ switch (ethqos->speed) {
+ case SPEED_1000:
+ rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_CONFIG_POS_NEG_DATA_SEL,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
+ RGMII_CONFIG_PROG_SWAP, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_CONFIG2_RX_PROG_SWAP,
+ RGMII_IO_MACRO_CONFIG2);
+
+ /* Set PRG_RCLK_DLY to 57 for 1.8 ns delay */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_RCLK_DLY,
+ 57, SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_PRG_DLY_EN,
+ SDCC_DDR_CONFIG_PRG_DLY_EN,
+ SDCC_HC_REG_DDR_CONFIG);
+ if (ethqos->rgmii_config_looback_en)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+ break;
+
+ case SPEED_100:
+ rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_2,
+ BIT(6), RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ /* Write 0x5 to PRG_RCLK_DLY_CODE */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
+ (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_HC_REG_DDR_CONFIG);
+ if (ethqos->rgmii_config_looback_en)
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ else
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ 0, RGMII_IO_MACRO_CONFIG);
+
+ break;
+
+ case SPEED_10:
+ rgmii_updatel(ethqos, RGMII_CONFIG_DDR_MODE,
+ RGMII_CONFIG_DDR_MODE, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_CONFIG_BYPASS_TX_ID_EN,
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_POS_NEG_DATA_SEL,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_DATA_DIVIDE_CLK_SEL,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_TX_CLK_PHASE_SHIFT_EN,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG_MAX_SPD_PRG_9,
+ BIT(12) | GENMASK(9, 8),
+ RGMII_IO_MACRO_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RSVD_CONFIG15,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ rgmii_updatel(ethqos, RGMII_CONFIG2_RX_PROG_SWAP,
+ 0, RGMII_IO_MACRO_CONFIG2);
+ /* Write 0x5 to PRG_RCLK_DLY_CODE */
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_CODE,
+ (BIT(29) | BIT(27)), SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_DDR_CONFIG_EXT_PRG_RCLK_DLY_EN,
+ SDCC_HC_REG_DDR_CONFIG);
+ rgmii_updatel(ethqos, RGMII_CONFIG_LOOPBACK_EN,
+ RGMII_CONFIG_LOOPBACK_EN, RGMII_IO_MACRO_CONFIG);
+ break;
+ default:
+ dev_err(&ethqos->pdev->dev,
+ "Invalid speed %d\n", ethqos->speed);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ethqos_configure(struct qcom_ethqos *ethqos)
+{
+ volatile unsigned int dll_lock;
+ unsigned int i, retry = 1000;
+
+ /* Reset to POR values and enable clk */
+ for (i = 0; i < ethqos->num_por; i++)
+ rgmii_writel(ethqos, ethqos->por[i].value,
+ ethqos->por[i].offset);
+ ethqos_set_func_clk_en(ethqos);
+
+ /* Initialize the DLL first */
+
+ /* Set DLL_RST */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST,
+ SDCC_DLL_CONFIG_DLL_RST, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set PDN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN,
+ SDCC_DLL_CONFIG_PDN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Clear DLL_RST */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_RST, 0,
+ SDCC_HC_REG_DLL_CONFIG);
+
+ /* Clear PDN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_PDN, 0,
+ SDCC_HC_REG_DLL_CONFIG);
+
+ if (ethqos->speed != SPEED_100 && ethqos->speed != SPEED_10) {
+ /* Set DLL_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_DLL_EN,
+ SDCC_DLL_CONFIG_DLL_EN, SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set CK_OUT_EN */
+ rgmii_updatel(ethqos, SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_DLL_CONFIG_CK_OUT_EN,
+ SDCC_HC_REG_DLL_CONFIG);
+
+ /* Set USR_CTL bit 26 with mask of 3 bits */
+ rgmii_updatel(ethqos, GENMASK(26, 24), BIT(26), SDCC_USR_CTL);
+
+ /* wait for DLL LOCK */
+ do {
+ mdelay(1);
+ dll_lock = rgmii_readl(ethqos, SDC4_STATUS);
+ if (dll_lock & SDC4_STATUS_DLL_LOCK)
+ break;
+ retry--;
+ } while (retry > 0);
+ if (!retry)
+ dev_err(&ethqos->pdev->dev,
+ "Timeout while waiting for DLL lock\n");
+ }
+
+ if (ethqos->speed == SPEED_1000)
+ ethqos_dll_configure(ethqos);
+
+ ethqos_rgmii_macro_init(ethqos);
+
+ return 0;
+}
+
+static void ethqos_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct qcom_ethqos *ethqos = priv;
+
+ ethqos->speed = speed;
+ ethqos_update_rgmii_clk(ethqos, speed);
+ ethqos_configure(ethqos);
+}
+
+static int ethqos_clks_config(void *priv, bool enabled)
+{
+ struct qcom_ethqos *ethqos = priv;
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(ethqos->rgmii_clk);
+ if (ret) {
+ dev_err(&ethqos->pdev->dev, "rgmii_clk enable failed\n");
+ return ret;
+ }
+
+ /* Enable functional clock to prevent DMA reset to timeout due
+ * to lacking PHY clock after the hardware block has been power
+ * cycled. The actual configuration will be adjusted once
+ * ethqos_fix_mac_speed() is invoked.
+ */
+ ethqos_set_func_clk_en(ethqos);
+ } else {
+ clk_disable_unprepare(ethqos->rgmii_clk);
+ }
+
+ return ret;
+}
+
+static int qcom_ethqos_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ const struct ethqos_emac_driver_data *data;
+ struct qcom_ethqos *ethqos;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat)) {
+ dev_err(&pdev->dev, "dt configuration failed\n");
+ return PTR_ERR(plat_dat);
+ }
+
+ plat_dat->clks_config = ethqos_clks_config;
+
+ ethqos = devm_kzalloc(&pdev->dev, sizeof(*ethqos), GFP_KERNEL);
+ if (!ethqos) {
+ ret = -ENOMEM;
+ goto err_mem;
+ }
+
+ ethqos->pdev = pdev;
+ ethqos->rgmii_base = devm_platform_ioremap_resource_byname(pdev, "rgmii");
+ if (IS_ERR(ethqos->rgmii_base)) {
+ ret = PTR_ERR(ethqos->rgmii_base);
+ goto err_mem;
+ }
+
+ data = of_device_get_match_data(&pdev->dev);
+ ethqos->por = data->por;
+ ethqos->num_por = data->num_por;
+ ethqos->rgmii_config_looback_en = data->rgmii_config_looback_en;
+
+ ethqos->rgmii_clk = devm_clk_get(&pdev->dev, "rgmii");
+ if (IS_ERR(ethqos->rgmii_clk)) {
+ ret = PTR_ERR(ethqos->rgmii_clk);
+ goto err_mem;
+ }
+
+ ret = ethqos_clks_config(ethqos, true);
+ if (ret)
+ goto err_mem;
+
+ ethqos->speed = SPEED_1000;
+ ethqos_update_rgmii_clk(ethqos, SPEED_1000);
+ ethqos_set_func_clk_en(ethqos);
+
+ plat_dat->bsp_priv = ethqos;
+ plat_dat->fix_mac_speed = ethqos_fix_mac_speed;
+ plat_dat->dump_debug_regs = rgmii_dump;
+ plat_dat->has_gmac4 = 1;
+ plat_dat->pmt = 1;
+ plat_dat->tso_en = of_property_read_bool(np, "snps,tso");
+ if (of_device_is_compatible(np, "qcom,qcs404-ethqos"))
+ plat_dat->rx_clk_runs_in_lpi = 1;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_clk;
+
+ return ret;
+
+err_clk:
+ ethqos_clks_config(ethqos, false);
+
+err_mem:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int qcom_ethqos_remove(struct platform_device *pdev)
+{
+ struct qcom_ethqos *ethqos;
+ int ret;
+
+ ethqos = get_stmmac_bsp_priv(&pdev->dev);
+ if (!ethqos)
+ return -ENODEV;
+
+ ret = stmmac_pltfr_remove(pdev);
+ ethqos_clks_config(ethqos, false);
+
+ return ret;
+}
+
+static const struct of_device_id qcom_ethqos_match[] = {
+ { .compatible = "qcom,qcs404-ethqos", .data = &emac_v2_3_0_data},
+ { .compatible = "qcom,sm8150-ethqos", .data = &emac_v2_1_0_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qcom_ethqos_match);
+
+static struct platform_driver qcom_ethqos_driver = {
+ .probe = qcom_ethqos_probe,
+ .remove = qcom_ethqos_remove,
+ .driver = {
+ .name = "qcom-ethqos",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = of_match_ptr(qcom_ethqos_match),
+ },
+};
+module_platform_driver(qcom_ethqos_driver);
+
+MODULE_DESCRIPTION("Qualcomm ETHQOS driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
new file mode 100644
index 000000000..cf682a9e3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -0,0 +1,1938 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/**
+ * DOC: dwmac-rk.c - Rockchip RK3288 DWMAC specific glue layer
+ *
+ * Copyright (C) 2014 Chen-Zhi (Roger Chen)
+ *
+ * Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>
+ */
+
+#include <linux/stmmac.h>
+#include <linux/bitops.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/of_net.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/pm_runtime.h>
+
+#include "stmmac_platform.h"
+
+struct rk_priv_data;
+struct rk_gmac_ops {
+ void (*set_to_rgmii)(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay);
+ void (*set_to_rmii)(struct rk_priv_data *bsp_priv);
+ void (*set_rgmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ void (*set_rmii_speed)(struct rk_priv_data *bsp_priv, int speed);
+ void (*set_clock_selection)(struct rk_priv_data *bsp_priv, bool input,
+ bool enable);
+ void (*integrated_phy_powerup)(struct rk_priv_data *bsp_priv);
+ bool regs_valid;
+ u32 regs[];
+};
+
+static const char * const rk_clocks[] = {
+ "aclk_mac", "pclk_mac", "mac_clk_tx", "clk_mac_speed",
+};
+
+static const char * const rk_rmii_clocks[] = {
+ "mac_clk_rx", "clk_mac_ref", "clk_mac_refout",
+};
+
+enum rk_clocks_index {
+ RK_ACLK_MAC = 0,
+ RK_PCLK_MAC,
+ RK_MAC_CLK_TX,
+ RK_CLK_MAC_SPEED,
+ RK_MAC_CLK_RX,
+ RK_CLK_MAC_REF,
+ RK_CLK_MAC_REFOUT,
+};
+
+struct rk_priv_data {
+ struct platform_device *pdev;
+ phy_interface_t phy_iface;
+ int id;
+ struct regulator *regulator;
+ bool suspended;
+ const struct rk_gmac_ops *ops;
+
+ bool clk_enabled;
+ bool clock_input;
+ bool integrated_phy;
+
+ struct clk_bulk_data *clks;
+ int num_clks;
+ struct clk *clk_mac;
+ struct clk *clk_phy;
+
+ struct reset_control *phy_reset;
+
+ int tx_delay;
+ int rx_delay;
+
+ struct regmap *grf;
+ struct regmap *php_grf;
+};
+
+#define HIWORD_UPDATE(val, mask, shift) \
+ ((val) << (shift) | (mask) << ((shift) + 16))
+
+#define GRF_BIT(nr) (BIT(nr) | BIT(nr+16))
+#define GRF_CLR_BIT(nr) (BIT(nr+16))
+
+#define DELAY_ENABLE(soc, tx, rx) \
+ (((tx) ? soc##_GMAC_TXCLK_DLY_ENABLE : soc##_GMAC_TXCLK_DLY_DISABLE) | \
+ ((rx) ? soc##_GMAC_RXCLK_DLY_ENABLE : soc##_GMAC_RXCLK_DLY_DISABLE))
+
+#define PX30_GRF_GMAC_CON1 0x0904
+
+/* PX30_GRF_GMAC_CON1 */
+#define PX30_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
+ GRF_BIT(6))
+#define PX30_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define PX30_GMAC_SPEED_100M GRF_BIT(2)
+
+static void px30_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void px30_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ struct device *dev = &bsp_priv->pdev->dev;
+ int ret;
+
+ if (!clk_mac_speed) {
+ dev_err(dev, "%s: Missing clk_mac_speed clock\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_10M);
+
+ ret = clk_set_rate(clk_mac_speed, 2500000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 2500000 failed: %d\n",
+ __func__, ret);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, PX30_GRF_GMAC_CON1,
+ PX30_GMAC_SPEED_100M);
+
+ ret = clk_set_rate(clk_mac_speed, 25000000);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate 25000000 failed: %d\n",
+ __func__, ret);
+
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops px30_ops = {
+ .set_to_rmii = px30_set_to_rmii,
+ .set_rmii_speed = px30_set_rmii_speed,
+};
+
+#define RK3128_GRF_MAC_CON0 0x0168
+#define RK3128_GRF_MAC_CON1 0x016c
+
+/* RK3128_GRF_MAC_CON0 */
+#define RK3128_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3128_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+#define RK3128_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3128_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3128_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3128_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3128_GRF_MAC_CON1 */
+#define RK3128_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(6) | GRF_CLR_BIT(7) | GRF_CLR_BIT(8))
+#define RK3128_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | GRF_BIT(8))
+#define RK3128_GMAC_FLOW_CTRL GRF_BIT(9)
+#define RK3128_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
+#define RK3128_GMAC_SPEED_10M GRF_CLR_BIT(10)
+#define RK3128_GMAC_SPEED_100M GRF_BIT(10)
+#define RK3128_GMAC_RMII_CLK_25M GRF_BIT(11)
+#define RK3128_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
+#define RK3128_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define RK3128_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3128_GMAC_RMII_MODE GRF_BIT(14)
+#define RK3128_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
+
+static void rk3128_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_PHY_INTF_SEL_RGMII |
+ RK3128_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON0,
+ DELAY_ENABLE(RK3128, tx_delay, rx_delay) |
+ RK3128_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3128_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3128_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_PHY_INTF_SEL_RMII | RK3128_GMAC_RMII_MODE);
+}
+
+static void rk3128_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3128_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_RMII_CLK_2_5M |
+ RK3128_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3128_GRF_MAC_CON1,
+ RK3128_GMAC_RMII_CLK_25M |
+ RK3128_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3128_ops = {
+ .set_to_rgmii = rk3128_set_to_rgmii,
+ .set_to_rmii = rk3128_set_to_rmii,
+ .set_rgmii_speed = rk3128_set_rgmii_speed,
+ .set_rmii_speed = rk3128_set_rmii_speed,
+};
+
+#define RK3228_GRF_MAC_CON0 0x0900
+#define RK3228_GRF_MAC_CON1 0x0904
+
+#define RK3228_GRF_CON_MUX 0x50
+
+/* RK3228_GRF_MAC_CON0 */
+#define RK3228_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3228_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3228_GRF_MAC_CON1 */
+#define RK3228_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RK3228_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3228_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3228_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3228_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define RK3228_GMAC_SPEED_100M GRF_BIT(2)
+#define RK3228_GMAC_RMII_CLK_25M GRF_BIT(7)
+#define RK3228_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
+#define RK3228_GMAC_CLK_125M (GRF_CLR_BIT(8) | GRF_CLR_BIT(9))
+#define RK3228_GMAC_CLK_25M (GRF_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_CLK_2_5M (GRF_CLR_BIT(8) | GRF_BIT(9))
+#define RK3228_GMAC_RMII_MODE GRF_BIT(10)
+#define RK3228_GMAC_RMII_MODE_CLR GRF_CLR_BIT(10)
+#define RK3228_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RK3228_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RK3228_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RK3228_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+
+/* RK3228_GRF_COM_MUX */
+#define RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY GRF_BIT(15)
+
+static void rk3228_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_PHY_INTF_SEL_RGMII |
+ RK3228_GMAC_RMII_MODE_CLR |
+ DELAY_ENABLE(RK3228, tx_delay, rx_delay));
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON0,
+ RK3228_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3228_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3228_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_PHY_INTF_SEL_RMII |
+ RK3228_GMAC_RMII_MODE);
+
+ /* set MAC to RMII mode */
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1, GRF_BIT(11));
+}
+
+static void rk3228_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3228_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_RMII_CLK_2_5M |
+ RK3228_GMAC_SPEED_10M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3228_GRF_MAC_CON1,
+ RK3228_GMAC_RMII_CLK_25M |
+ RK3228_GMAC_SPEED_100M);
+ else
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+}
+
+static void rk3228_integrated_phy_powerup(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK3228_GRF_CON_MUX,
+ RK3228_GRF_CON_MUX_GMAC_INTEGRATED_PHY);
+}
+
+static const struct rk_gmac_ops rk3228_ops = {
+ .set_to_rgmii = rk3228_set_to_rgmii,
+ .set_to_rmii = rk3228_set_to_rmii,
+ .set_rgmii_speed = rk3228_set_rgmii_speed,
+ .set_rmii_speed = rk3228_set_rmii_speed,
+ .integrated_phy_powerup = rk3228_integrated_phy_powerup,
+};
+
+#define RK3288_GRF_SOC_CON1 0x0248
+#define RK3288_GRF_SOC_CON3 0x0250
+
+/*RK3288_GRF_SOC_CON1*/
+#define RK3288_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(6) | GRF_CLR_BIT(7) | \
+ GRF_CLR_BIT(8))
+#define RK3288_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(6) | GRF_CLR_BIT(7) | \
+ GRF_BIT(8))
+#define RK3288_GMAC_FLOW_CTRL GRF_BIT(9)
+#define RK3288_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(9)
+#define RK3288_GMAC_SPEED_10M GRF_CLR_BIT(10)
+#define RK3288_GMAC_SPEED_100M GRF_BIT(10)
+#define RK3288_GMAC_RMII_CLK_25M GRF_BIT(11)
+#define RK3288_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(11)
+#define RK3288_GMAC_CLK_125M (GRF_CLR_BIT(12) | GRF_CLR_BIT(13))
+#define RK3288_GMAC_CLK_25M (GRF_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_CLK_2_5M (GRF_CLR_BIT(12) | GRF_BIT(13))
+#define RK3288_GMAC_RMII_MODE GRF_BIT(14)
+#define RK3288_GMAC_RMII_MODE_CLR GRF_CLR_BIT(14)
+
+/*RK3288_GRF_SOC_CON3*/
+#define RK3288_GMAC_TXCLK_DLY_ENABLE GRF_BIT(14)
+#define RK3288_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(14)
+#define RK3288_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3288_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3288_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3288_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3288_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_PHY_INTF_SEL_RGMII |
+ RK3288_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON3,
+ DELAY_ENABLE(RK3288, tx_delay, rx_delay) |
+ RK3288_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3288_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3288_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_PHY_INTF_SEL_RMII | RK3288_GMAC_RMII_MODE);
+}
+
+static void rk3288_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3288_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_RMII_CLK_2_5M |
+ RK3288_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3288_GRF_SOC_CON1,
+ RK3288_GMAC_RMII_CLK_25M |
+ RK3288_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3288_ops = {
+ .set_to_rgmii = rk3288_set_to_rgmii,
+ .set_to_rmii = rk3288_set_to_rmii,
+ .set_rgmii_speed = rk3288_set_rgmii_speed,
+ .set_rmii_speed = rk3288_set_rmii_speed,
+};
+
+#define RK3308_GRF_MAC_CON0 0x04a0
+
+/* RK3308_GRF_MAC_CON0 */
+#define RK3308_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(2) | GRF_CLR_BIT(3) | \
+ GRF_BIT(4))
+#define RK3308_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3308_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3308_GMAC_SPEED_10M GRF_CLR_BIT(0)
+#define RK3308_GMAC_SPEED_100M GRF_BIT(0)
+
+static void rk3308_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
+ RK3308_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void rk3308_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
+ RK3308_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3308_GRF_MAC_CON0,
+ RK3308_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3308_ops = {
+ .set_to_rmii = rk3308_set_to_rmii,
+ .set_rmii_speed = rk3308_set_rmii_speed,
+};
+
+#define RK3328_GRF_MAC_CON0 0x0900
+#define RK3328_GRF_MAC_CON1 0x0904
+#define RK3328_GRF_MAC_CON2 0x0908
+#define RK3328_GRF_MACPHY_CON1 0xb04
+
+/* RK3328_GRF_MAC_CON0 */
+#define RK3328_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 7)
+#define RK3328_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+/* RK3328_GRF_MAC_CON1 */
+#define RK3328_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RK3328_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3328_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3328_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3328_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define RK3328_GMAC_SPEED_100M GRF_BIT(2)
+#define RK3328_GMAC_RMII_CLK_25M GRF_BIT(7)
+#define RK3328_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
+#define RK3328_GMAC_CLK_125M (GRF_CLR_BIT(11) | GRF_CLR_BIT(12))
+#define RK3328_GMAC_CLK_25M (GRF_BIT(11) | GRF_BIT(12))
+#define RK3328_GMAC_CLK_2_5M (GRF_CLR_BIT(11) | GRF_BIT(12))
+#define RK3328_GMAC_RMII_MODE GRF_BIT(9)
+#define RK3328_GMAC_RMII_MODE_CLR GRF_CLR_BIT(9)
+#define RK3328_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RK3328_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RK3328_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RK3328_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+
+/* RK3328_GRF_MACPHY_CON1 */
+#define RK3328_MACPHY_RMII_MODE GRF_BIT(9)
+
+static void rk3328_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_PHY_INTF_SEL_RGMII |
+ RK3328_GMAC_RMII_MODE_CLR |
+ RK3328_GMAC_RXCLK_DLY_ENABLE |
+ RK3328_GMAC_TXCLK_DLY_ENABLE);
+
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON0,
+ RK3328_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3328_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3328_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int reg;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
+ RK3328_GRF_MAC_CON1;
+
+ regmap_write(bsp_priv->grf, reg,
+ RK3328_GMAC_PHY_INTF_SEL_RMII |
+ RK3328_GMAC_RMII_MODE);
+}
+
+static void rk3328_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3328_GRF_MAC_CON1,
+ RK3328_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3328_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int reg;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ reg = bsp_priv->integrated_phy ? RK3328_GRF_MAC_CON2 :
+ RK3328_GRF_MAC_CON1;
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, reg,
+ RK3328_GMAC_RMII_CLK_2_5M |
+ RK3328_GMAC_SPEED_10M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, reg,
+ RK3328_GMAC_RMII_CLK_25M |
+ RK3328_GMAC_SPEED_100M);
+ else
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+}
+
+static void rk3328_integrated_phy_powerup(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK3328_GRF_MACPHY_CON1,
+ RK3328_MACPHY_RMII_MODE);
+}
+
+static const struct rk_gmac_ops rk3328_ops = {
+ .set_to_rgmii = rk3328_set_to_rgmii,
+ .set_to_rmii = rk3328_set_to_rmii,
+ .set_rgmii_speed = rk3328_set_rgmii_speed,
+ .set_rmii_speed = rk3328_set_rmii_speed,
+ .integrated_phy_powerup = rk3328_integrated_phy_powerup,
+};
+
+#define RK3366_GRF_SOC_CON6 0x0418
+#define RK3366_GRF_SOC_CON7 0x041c
+
+/* RK3366_GRF_SOC_CON6 */
+#define RK3366_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_CLR_BIT(11))
+#define RK3366_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_BIT(11))
+#define RK3366_GMAC_FLOW_CTRL GRF_BIT(8)
+#define RK3366_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
+#define RK3366_GMAC_SPEED_10M GRF_CLR_BIT(7)
+#define RK3366_GMAC_SPEED_100M GRF_BIT(7)
+#define RK3366_GMAC_RMII_CLK_25M GRF_BIT(3)
+#define RK3366_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
+#define RK3366_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
+#define RK3366_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3366_GMAC_RMII_MODE GRF_BIT(6)
+#define RK3366_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
+
+/* RK3366_GRF_SOC_CON7 */
+#define RK3366_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3366_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+#define RK3366_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3366_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3366_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3366_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3366_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_PHY_INTF_SEL_RGMII |
+ RK3366_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON7,
+ DELAY_ENABLE(RK3366, tx_delay, rx_delay) |
+ RK3366_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3366_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3366_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_PHY_INTF_SEL_RMII | RK3366_GMAC_RMII_MODE);
+}
+
+static void rk3366_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3366_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_RMII_CLK_2_5M |
+ RK3366_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3366_GRF_SOC_CON6,
+ RK3366_GMAC_RMII_CLK_25M |
+ RK3366_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3366_ops = {
+ .set_to_rgmii = rk3366_set_to_rgmii,
+ .set_to_rmii = rk3366_set_to_rmii,
+ .set_rgmii_speed = rk3366_set_rgmii_speed,
+ .set_rmii_speed = rk3366_set_rmii_speed,
+};
+
+#define RK3368_GRF_SOC_CON15 0x043c
+#define RK3368_GRF_SOC_CON16 0x0440
+
+/* RK3368_GRF_SOC_CON15 */
+#define RK3368_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_CLR_BIT(11))
+#define RK3368_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_BIT(11))
+#define RK3368_GMAC_FLOW_CTRL GRF_BIT(8)
+#define RK3368_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
+#define RK3368_GMAC_SPEED_10M GRF_CLR_BIT(7)
+#define RK3368_GMAC_SPEED_100M GRF_BIT(7)
+#define RK3368_GMAC_RMII_CLK_25M GRF_BIT(3)
+#define RK3368_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
+#define RK3368_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
+#define RK3368_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3368_GMAC_RMII_MODE GRF_BIT(6)
+#define RK3368_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
+
+/* RK3368_GRF_SOC_CON16 */
+#define RK3368_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3368_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+#define RK3368_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3368_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3368_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3368_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3368_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_PHY_INTF_SEL_RGMII |
+ RK3368_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON16,
+ DELAY_ENABLE(RK3368, tx_delay, rx_delay) |
+ RK3368_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3368_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3368_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_PHY_INTF_SEL_RMII | RK3368_GMAC_RMII_MODE);
+}
+
+static void rk3368_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3368_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_RMII_CLK_2_5M |
+ RK3368_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3368_GRF_SOC_CON15,
+ RK3368_GMAC_RMII_CLK_25M |
+ RK3368_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3368_ops = {
+ .set_to_rgmii = rk3368_set_to_rgmii,
+ .set_to_rmii = rk3368_set_to_rmii,
+ .set_rgmii_speed = rk3368_set_rgmii_speed,
+ .set_rmii_speed = rk3368_set_rmii_speed,
+};
+
+#define RK3399_GRF_SOC_CON5 0xc214
+#define RK3399_GRF_SOC_CON6 0xc218
+
+/* RK3399_GRF_SOC_CON5 */
+#define RK3399_GMAC_PHY_INTF_SEL_RGMII (GRF_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_CLR_BIT(11))
+#define RK3399_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(9) | GRF_CLR_BIT(10) | \
+ GRF_BIT(11))
+#define RK3399_GMAC_FLOW_CTRL GRF_BIT(8)
+#define RK3399_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(8)
+#define RK3399_GMAC_SPEED_10M GRF_CLR_BIT(7)
+#define RK3399_GMAC_SPEED_100M GRF_BIT(7)
+#define RK3399_GMAC_RMII_CLK_25M GRF_BIT(3)
+#define RK3399_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(3)
+#define RK3399_GMAC_CLK_125M (GRF_CLR_BIT(4) | GRF_CLR_BIT(5))
+#define RK3399_GMAC_CLK_25M (GRF_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_CLK_2_5M (GRF_CLR_BIT(4) | GRF_BIT(5))
+#define RK3399_GMAC_RMII_MODE GRF_BIT(6)
+#define RK3399_GMAC_RMII_MODE_CLR GRF_CLR_BIT(6)
+
+/* RK3399_GRF_SOC_CON6 */
+#define RK3399_GMAC_TXCLK_DLY_ENABLE GRF_BIT(7)
+#define RK3399_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(7)
+#define RK3399_GMAC_RXCLK_DLY_ENABLE GRF_BIT(15)
+#define RK3399_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(15)
+#define RK3399_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3399_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3399_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_PHY_INTF_SEL_RGMII |
+ RK3399_GMAC_RMII_MODE_CLR);
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON6,
+ DELAY_ENABLE(RK3399, tx_delay, rx_delay) |
+ RK3399_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3399_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3399_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_PHY_INTF_SEL_RMII | RK3399_GMAC_RMII_MODE);
+}
+
+static void rk3399_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10)
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_CLK_2_5M);
+ else if (speed == 100)
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_CLK_25M);
+ else if (speed == 1000)
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_CLK_125M);
+ else
+ dev_err(dev, "unknown speed value for RGMII! speed=%d", speed);
+}
+
+static void rk3399_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_RMII_CLK_2_5M |
+ RK3399_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RK3399_GRF_SOC_CON5,
+ RK3399_GMAC_RMII_CLK_25M |
+ RK3399_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rk3399_ops = {
+ .set_to_rgmii = rk3399_set_to_rgmii,
+ .set_to_rmii = rk3399_set_to_rmii,
+ .set_rgmii_speed = rk3399_set_rgmii_speed,
+ .set_rmii_speed = rk3399_set_rmii_speed,
+};
+
+#define RK3568_GRF_GMAC0_CON0 0x0380
+#define RK3568_GRF_GMAC0_CON1 0x0384
+#define RK3568_GRF_GMAC1_CON0 0x0388
+#define RK3568_GRF_GMAC1_CON1 0x038c
+
+/* RK3568_GRF_GMAC0_CON1 && RK3568_GRF_GMAC1_CON1 */
+#define RK3568_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RK3568_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RK3568_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RK3568_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RK3568_GMAC_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RK3568_GMAC_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+#define RK3568_GMAC_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RK3568_GMAC_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+
+/* RK3568_GRF_GMAC0_CON0 && RK3568_GRF_GMAC1_CON0 */
+#define RK3568_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RK3568_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rk3568_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ u32 con0, con1;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ con0 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON0 :
+ RK3568_GRF_GMAC0_CON0;
+ con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
+ RK3568_GRF_GMAC0_CON1;
+
+ regmap_write(bsp_priv->grf, con0,
+ RK3568_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3568_GMAC_CLK_TX_DL_CFG(tx_delay));
+
+ regmap_write(bsp_priv->grf, con1,
+ RK3568_GMAC_PHY_INTF_SEL_RGMII |
+ RK3568_GMAC_RXCLK_DLY_ENABLE |
+ RK3568_GMAC_TXCLK_DLY_ENABLE);
+}
+
+static void rk3568_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ u32 con1;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ con1 = (bsp_priv->id == 1) ? RK3568_GRF_GMAC1_CON1 :
+ RK3568_GRF_GMAC0_CON1;
+ regmap_write(bsp_priv->grf, con1, RK3568_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void rk3568_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ case 1000:
+ rate = 125000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static const struct rk_gmac_ops rk3568_ops = {
+ .set_to_rgmii = rk3568_set_to_rgmii,
+ .set_to_rmii = rk3568_set_to_rmii,
+ .set_rgmii_speed = rk3568_set_gmac_speed,
+ .set_rmii_speed = rk3568_set_gmac_speed,
+ .regs_valid = true,
+ .regs = {
+ 0xfe2a0000, /* gmac0 */
+ 0xfe010000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
+/* sys_grf */
+#define RK3588_GRF_GMAC_CON7 0X031c
+#define RK3588_GRF_GMAC_CON8 0X0320
+#define RK3588_GRF_GMAC_CON9 0X0324
+
+#define RK3588_GMAC_RXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 3)
+#define RK3588_GMAC_RXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 3)
+#define RK3588_GMAC_TXCLK_DLY_ENABLE(id) GRF_BIT(2 * (id) + 2)
+#define RK3588_GMAC_TXCLK_DLY_DISABLE(id) GRF_CLR_BIT(2 * (id) + 2)
+
+#define RK3588_GMAC_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 8)
+#define RK3588_GMAC_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0xFF, 0)
+
+/* php_grf */
+#define RK3588_GRF_GMAC_CON0 0X0008
+#define RK3588_GRF_CLK_CON1 0X0070
+
+#define RK3588_GMAC_PHY_INTF_SEL_RGMII(id) \
+ (GRF_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_CLR_BIT(5 + (id) * 6))
+#define RK3588_GMAC_PHY_INTF_SEL_RMII(id) \
+ (GRF_CLR_BIT(3 + (id) * 6) | GRF_CLR_BIT(4 + (id) * 6) | GRF_BIT(5 + (id) * 6))
+
+#define RK3588_GMAC_CLK_RMII_MODE(id) GRF_BIT(5 * (id))
+#define RK3588_GMAC_CLK_RGMII_MODE(id) GRF_CLR_BIT(5 * (id))
+
+#define RK3588_GMAC_CLK_SELET_CRU(id) GRF_BIT(5 * (id) + 4)
+#define RK3588_GMAC_CLK_SELET_IO(id) GRF_CLR_BIT(5 * (id) + 4)
+
+#define RK3588_GMA_CLK_RMII_DIV2(id) GRF_BIT(5 * (id) + 2)
+#define RK3588_GMA_CLK_RMII_DIV20(id) GRF_CLR_BIT(5 * (id) + 2)
+
+#define RK3588_GMAC_CLK_RGMII_DIV1(id) \
+ (GRF_CLR_BIT(5 * (id) + 2) | GRF_CLR_BIT(5 * (id) + 3))
+#define RK3588_GMAC_CLK_RGMII_DIV5(id) \
+ (GRF_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+#define RK3588_GMAC_CLK_RGMII_DIV50(id) \
+ (GRF_CLR_BIT(5 * (id) + 2) | GRF_BIT(5 * (id) + 3))
+
+#define RK3588_GMAC_CLK_RMII_GATE(id) GRF_BIT(5 * (id) + 1)
+#define RK3588_GMAC_CLK_RMII_NOGATE(id) GRF_CLR_BIT(5 * (id) + 1)
+
+static void rk3588_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ u32 offset_con, id = bsp_priv->id;
+
+ if (IS_ERR(bsp_priv->grf) || IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "Missing rockchip,grf or rockchip,php_grf property\n");
+ return;
+ }
+
+ offset_con = bsp_priv->id == 1 ? RK3588_GRF_GMAC_CON9 :
+ RK3588_GRF_GMAC_CON8;
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
+ RK3588_GMAC_PHY_INTF_SEL_RGMII(id));
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
+ RK3588_GMAC_CLK_RGMII_MODE(id));
+
+ regmap_write(bsp_priv->grf, RK3588_GRF_GMAC_CON7,
+ RK3588_GMAC_RXCLK_DLY_ENABLE(id) |
+ RK3588_GMAC_TXCLK_DLY_ENABLE(id));
+
+ regmap_write(bsp_priv->grf, offset_con,
+ RK3588_GMAC_CLK_RX_DL_CFG(rx_delay) |
+ RK3588_GMAC_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rk3588_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->php_grf)) {
+ dev_err(dev, "%s: Missing rockchip,php_grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_GMAC_CON0,
+ RK3588_GMAC_PHY_INTF_SEL_RMII(bsp_priv->id));
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1,
+ RK3588_GMAC_CLK_RMII_MODE(bsp_priv->id));
+}
+
+static void rk3588_set_gmac_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned int val = 0, id = bsp_priv->id;
+
+ switch (speed) {
+ case 10:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMA_CLK_RMII_DIV20(id);
+ else
+ val = RK3588_GMAC_CLK_RGMII_DIV50(id);
+ break;
+ case 100:
+ if (bsp_priv->phy_iface == PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMA_CLK_RMII_DIV2(id);
+ else
+ val = RK3588_GMAC_CLK_RGMII_DIV5(id);
+ break;
+ case 1000:
+ if (bsp_priv->phy_iface != PHY_INTERFACE_MODE_RMII)
+ val = RK3588_GMAC_CLK_RGMII_DIV1(id);
+ else
+ goto err;
+ break;
+ default:
+ goto err;
+ }
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
+
+ return;
+err:
+ dev_err(dev, "unknown speed value for GMAC speed=%d", speed);
+}
+
+static void rk3588_set_clock_selection(struct rk_priv_data *bsp_priv, bool input,
+ bool enable)
+{
+ unsigned int val = input ? RK3588_GMAC_CLK_SELET_IO(bsp_priv->id) :
+ RK3588_GMAC_CLK_SELET_CRU(bsp_priv->id);
+
+ val |= enable ? RK3588_GMAC_CLK_RMII_NOGATE(bsp_priv->id) :
+ RK3588_GMAC_CLK_RMII_GATE(bsp_priv->id);
+
+ regmap_write(bsp_priv->php_grf, RK3588_GRF_CLK_CON1, val);
+}
+
+static const struct rk_gmac_ops rk3588_ops = {
+ .set_to_rgmii = rk3588_set_to_rgmii,
+ .set_to_rmii = rk3588_set_to_rmii,
+ .set_rgmii_speed = rk3588_set_gmac_speed,
+ .set_rmii_speed = rk3588_set_gmac_speed,
+ .set_clock_selection = rk3588_set_clock_selection,
+ .regs_valid = true,
+ .regs = {
+ 0xfe1b0000, /* gmac0 */
+ 0xfe1c0000, /* gmac1 */
+ 0x0, /* sentinel */
+ },
+};
+
+#define RV1108_GRF_GMAC_CON0 0X0900
+
+/* RV1108_GRF_GMAC_CON0 */
+#define RV1108_GMAC_PHY_INTF_SEL_RMII (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | \
+ GRF_BIT(6))
+#define RV1108_GMAC_FLOW_CTRL GRF_BIT(3)
+#define RV1108_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(3)
+#define RV1108_GMAC_SPEED_10M GRF_CLR_BIT(2)
+#define RV1108_GMAC_SPEED_100M GRF_BIT(2)
+#define RV1108_GMAC_RMII_CLK_25M GRF_BIT(7)
+#define RV1108_GMAC_RMII_CLK_2_5M GRF_CLR_BIT(7)
+
+static void rv1108_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
+ RV1108_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void rv1108_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ if (speed == 10) {
+ regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
+ RV1108_GMAC_RMII_CLK_2_5M |
+ RV1108_GMAC_SPEED_10M);
+ } else if (speed == 100) {
+ regmap_write(bsp_priv->grf, RV1108_GRF_GMAC_CON0,
+ RV1108_GMAC_RMII_CLK_25M |
+ RV1108_GMAC_SPEED_100M);
+ } else {
+ dev_err(dev, "unknown speed value for RMII! speed=%d", speed);
+ }
+}
+
+static const struct rk_gmac_ops rv1108_ops = {
+ .set_to_rmii = rv1108_set_to_rmii,
+ .set_rmii_speed = rv1108_set_rmii_speed,
+};
+
+#define RV1126_GRF_GMAC_CON0 0X0070
+#define RV1126_GRF_GMAC_CON1 0X0074
+#define RV1126_GRF_GMAC_CON2 0X0078
+
+/* RV1126_GRF_GMAC_CON0 */
+#define RV1126_GMAC_PHY_INTF_SEL_RGMII \
+ (GRF_BIT(4) | GRF_CLR_BIT(5) | GRF_CLR_BIT(6))
+#define RV1126_GMAC_PHY_INTF_SEL_RMII \
+ (GRF_CLR_BIT(4) | GRF_CLR_BIT(5) | GRF_BIT(6))
+#define RV1126_GMAC_FLOW_CTRL GRF_BIT(7)
+#define RV1126_GMAC_FLOW_CTRL_CLR GRF_CLR_BIT(7)
+#define RV1126_GMAC_M0_RXCLK_DLY_ENABLE GRF_BIT(1)
+#define RV1126_GMAC_M0_RXCLK_DLY_DISABLE GRF_CLR_BIT(1)
+#define RV1126_GMAC_M0_TXCLK_DLY_ENABLE GRF_BIT(0)
+#define RV1126_GMAC_M0_TXCLK_DLY_DISABLE GRF_CLR_BIT(0)
+#define RV1126_GMAC_M1_RXCLK_DLY_ENABLE GRF_BIT(3)
+#define RV1126_GMAC_M1_RXCLK_DLY_DISABLE GRF_CLR_BIT(3)
+#define RV1126_GMAC_M1_TXCLK_DLY_ENABLE GRF_BIT(2)
+#define RV1126_GMAC_M1_TXCLK_DLY_DISABLE GRF_CLR_BIT(2)
+
+/* RV1126_GRF_GMAC_CON1 */
+#define RV1126_GMAC_M0_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RV1126_GMAC_M0_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+/* RV1126_GRF_GMAC_CON2 */
+#define RV1126_GMAC_M1_CLK_RX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 8)
+#define RV1126_GMAC_M1_CLK_TX_DL_CFG(val) HIWORD_UPDATE(val, 0x7F, 0)
+
+static void rv1126_set_to_rgmii(struct rk_priv_data *bsp_priv,
+ int tx_delay, int rx_delay)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "Missing rockchip,grf property\n");
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
+ RV1126_GMAC_PHY_INTF_SEL_RGMII |
+ RV1126_GMAC_M0_RXCLK_DLY_ENABLE |
+ RV1126_GMAC_M0_TXCLK_DLY_ENABLE |
+ RV1126_GMAC_M1_RXCLK_DLY_ENABLE |
+ RV1126_GMAC_M1_TXCLK_DLY_ENABLE);
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON1,
+ RV1126_GMAC_M0_CLK_RX_DL_CFG(rx_delay) |
+ RV1126_GMAC_M0_CLK_TX_DL_CFG(tx_delay));
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON2,
+ RV1126_GMAC_M1_CLK_RX_DL_CFG(rx_delay) |
+ RV1126_GMAC_M1_CLK_TX_DL_CFG(tx_delay));
+}
+
+static void rv1126_set_to_rmii(struct rk_priv_data *bsp_priv)
+{
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (IS_ERR(bsp_priv->grf)) {
+ dev_err(dev, "%s: Missing rockchip,grf property\n", __func__);
+ return;
+ }
+
+ regmap_write(bsp_priv->grf, RV1126_GRF_GMAC_CON0,
+ RV1126_GMAC_PHY_INTF_SEL_RMII);
+}
+
+static void rv1126_set_rgmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ case 1000:
+ rate = 125000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static void rv1126_set_rmii_speed(struct rk_priv_data *bsp_priv, int speed)
+{
+ struct clk *clk_mac_speed = bsp_priv->clks[RK_CLK_MAC_SPEED].clk;
+ struct device *dev = &bsp_priv->pdev->dev;
+ unsigned long rate;
+ int ret;
+
+ switch (speed) {
+ case 10:
+ rate = 2500000;
+ break;
+ case 100:
+ rate = 25000000;
+ break;
+ default:
+ dev_err(dev, "unknown speed value for RGMII speed=%d", speed);
+ return;
+ }
+
+ ret = clk_set_rate(clk_mac_speed, rate);
+ if (ret)
+ dev_err(dev, "%s: set clk_mac_speed rate %ld failed %d\n",
+ __func__, rate, ret);
+}
+
+static const struct rk_gmac_ops rv1126_ops = {
+ .set_to_rgmii = rv1126_set_to_rgmii,
+ .set_to_rmii = rv1126_set_to_rmii,
+ .set_rgmii_speed = rv1126_set_rgmii_speed,
+ .set_rmii_speed = rv1126_set_rmii_speed,
+};
+
+#define RK_GRF_MACPHY_CON0 0xb00
+#define RK_GRF_MACPHY_CON1 0xb04
+#define RK_GRF_MACPHY_CON2 0xb08
+#define RK_GRF_MACPHY_CON3 0xb0c
+
+#define RK_MACPHY_ENABLE GRF_BIT(0)
+#define RK_MACPHY_DISABLE GRF_CLR_BIT(0)
+#define RK_MACPHY_CFG_CLK_50M GRF_BIT(14)
+#define RK_GMAC2PHY_RMII_MODE (GRF_BIT(6) | GRF_CLR_BIT(7))
+#define RK_GRF_CON2_MACPHY_ID HIWORD_UPDATE(0x1234, 0xffff, 0)
+#define RK_GRF_CON3_MACPHY_ID HIWORD_UPDATE(0x35, 0x3f, 0)
+
+static void rk_gmac_integrated_phy_powerup(struct rk_priv_data *priv)
+{
+ if (priv->ops->integrated_phy_powerup)
+ priv->ops->integrated_phy_powerup(priv);
+
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_CFG_CLK_50M);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_GMAC2PHY_RMII_MODE);
+
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON2, RK_GRF_CON2_MACPHY_ID);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON3, RK_GRF_CON3_MACPHY_ID);
+
+ if (priv->phy_reset) {
+ /* PHY needs to be disabled before trying to reset it */
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+ usleep_range(10, 20);
+ if (priv->phy_reset)
+ reset_control_deassert(priv->phy_reset);
+ usleep_range(10, 20);
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_ENABLE);
+ msleep(30);
+ }
+}
+
+static void rk_gmac_integrated_phy_powerdown(struct rk_priv_data *priv)
+{
+ regmap_write(priv->grf, RK_GRF_MACPHY_CON0, RK_MACPHY_DISABLE);
+ if (priv->phy_reset)
+ reset_control_assert(priv->phy_reset);
+}
+
+static int rk_gmac_clk_init(struct plat_stmmacenet_data *plat)
+{
+ struct rk_priv_data *bsp_priv = plat->bsp_priv;
+ struct device *dev = &bsp_priv->pdev->dev;
+ int phy_iface = bsp_priv->phy_iface;
+ int i, j, ret;
+
+ bsp_priv->clk_enabled = false;
+
+ bsp_priv->num_clks = ARRAY_SIZE(rk_clocks);
+ if (phy_iface == PHY_INTERFACE_MODE_RMII)
+ bsp_priv->num_clks += ARRAY_SIZE(rk_rmii_clocks);
+
+ bsp_priv->clks = devm_kcalloc(dev, bsp_priv->num_clks,
+ sizeof(*bsp_priv->clks), GFP_KERNEL);
+ if (!bsp_priv->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(rk_clocks); i++)
+ bsp_priv->clks[i].id = rk_clocks[i];
+
+ if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ for (j = 0; j < ARRAY_SIZE(rk_rmii_clocks); j++)
+ bsp_priv->clks[i++].id = rk_rmii_clocks[j];
+ }
+
+ ret = devm_clk_bulk_get_optional(dev, bsp_priv->num_clks,
+ bsp_priv->clks);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get clocks\n");
+
+ /* "stmmaceth" will be enabled by the core */
+ bsp_priv->clk_mac = devm_clk_get(dev, "stmmaceth");
+ ret = PTR_ERR_OR_ZERO(bsp_priv->clk_mac);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot get stmmaceth clock\n");
+
+ if (bsp_priv->clock_input) {
+ dev_info(dev, "clock input from PHY\n");
+ } else if (phy_iface == PHY_INTERFACE_MODE_RMII) {
+ clk_set_rate(bsp_priv->clk_mac, 50000000);
+ }
+
+ if (plat->phy_node && bsp_priv->integrated_phy) {
+ bsp_priv->clk_phy = of_clk_get(plat->phy_node, 0);
+ ret = PTR_ERR_OR_ZERO(bsp_priv->clk_phy);
+ if (ret)
+ return dev_err_probe(dev, ret, "Cannot get PHY clock\n");
+ clk_set_rate(bsp_priv->clk_phy, 50000000);
+ }
+
+ return 0;
+}
+
+static int gmac_clk_enable(struct rk_priv_data *bsp_priv, bool enable)
+{
+ int ret;
+
+ if (enable) {
+ if (!bsp_priv->clk_enabled) {
+ ret = clk_bulk_prepare_enable(bsp_priv->num_clks,
+ bsp_priv->clks);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(bsp_priv->clk_phy);
+ if (ret)
+ return ret;
+
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, true);
+
+ mdelay(5);
+ bsp_priv->clk_enabled = true;
+ }
+ } else {
+ if (bsp_priv->clk_enabled) {
+ clk_bulk_disable_unprepare(bsp_priv->num_clks,
+ bsp_priv->clks);
+ clk_disable_unprepare(bsp_priv->clk_phy);
+
+ if (bsp_priv->ops && bsp_priv->ops->set_clock_selection)
+ bsp_priv->ops->set_clock_selection(bsp_priv,
+ bsp_priv->clock_input, false);
+
+ bsp_priv->clk_enabled = false;
+ }
+ }
+
+ return 0;
+}
+
+static int phy_power_on(struct rk_priv_data *bsp_priv, bool enable)
+{
+ struct regulator *ldo = bsp_priv->regulator;
+ int ret;
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ if (enable) {
+ ret = regulator_enable(ldo);
+ if (ret)
+ dev_err(dev, "fail to enable phy-supply\n");
+ } else {
+ ret = regulator_disable(ldo);
+ if (ret)
+ dev_err(dev, "fail to disable phy-supply\n");
+ }
+
+ return 0;
+}
+
+static struct rk_priv_data *rk_gmac_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat,
+ const struct rk_gmac_ops *ops)
+{
+ struct rk_priv_data *bsp_priv;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int ret;
+ const char *strings = NULL;
+ int value;
+
+ bsp_priv = devm_kzalloc(dev, sizeof(*bsp_priv), GFP_KERNEL);
+ if (!bsp_priv)
+ return ERR_PTR(-ENOMEM);
+
+ of_get_phy_mode(dev->of_node, &bsp_priv->phy_iface);
+ bsp_priv->ops = ops;
+
+ /* Some SoCs have multiple MAC controllers, which need
+ * to be distinguished.
+ */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res && ops->regs_valid) {
+ int i = 0;
+
+ while (ops->regs[i]) {
+ if (ops->regs[i] == res->start) {
+ bsp_priv->id = i;
+ break;
+ }
+ i++;
+ }
+ }
+
+ bsp_priv->regulator = devm_regulator_get(dev, "phy");
+ if (IS_ERR(bsp_priv->regulator)) {
+ ret = PTR_ERR(bsp_priv->regulator);
+ dev_err_probe(dev, ret, "failed to get phy regulator\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = of_property_read_string(dev->of_node, "clock_in_out", &strings);
+ if (ret) {
+ dev_err(dev, "Can not read property: clock_in_out.\n");
+ bsp_priv->clock_input = true;
+ } else {
+ dev_info(dev, "clock input or output? (%s).\n",
+ strings);
+ if (!strcmp(strings, "input"))
+ bsp_priv->clock_input = true;
+ else
+ bsp_priv->clock_input = false;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "tx_delay", &value);
+ if (ret) {
+ bsp_priv->tx_delay = 0x30;
+ dev_err(dev, "Can not read property: tx_delay.");
+ dev_err(dev, "set tx_delay to 0x%x\n",
+ bsp_priv->tx_delay);
+ } else {
+ dev_info(dev, "TX delay(0x%x).\n", value);
+ bsp_priv->tx_delay = value;
+ }
+
+ ret = of_property_read_u32(dev->of_node, "rx_delay", &value);
+ if (ret) {
+ bsp_priv->rx_delay = 0x10;
+ dev_err(dev, "Can not read property: rx_delay.");
+ dev_err(dev, "set rx_delay to 0x%x\n",
+ bsp_priv->rx_delay);
+ } else {
+ dev_info(dev, "RX delay(0x%x).\n", value);
+ bsp_priv->rx_delay = value;
+ }
+
+ bsp_priv->grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,grf");
+ bsp_priv->php_grf = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "rockchip,php-grf");
+
+ if (plat->phy_node) {
+ bsp_priv->integrated_phy = of_property_read_bool(plat->phy_node,
+ "phy-is-integrated");
+ if (bsp_priv->integrated_phy) {
+ bsp_priv->phy_reset = of_reset_control_get(plat->phy_node, NULL);
+ if (IS_ERR(bsp_priv->phy_reset)) {
+ dev_err(&pdev->dev, "No PHY reset control found.\n");
+ bsp_priv->phy_reset = NULL;
+ }
+ }
+ }
+ dev_info(dev, "integrated PHY? (%s).\n",
+ bsp_priv->integrated_phy ? "yes" : "no");
+
+ bsp_priv->pdev = pdev;
+
+ return bsp_priv;
+}
+
+static int rk_gmac_check_ops(struct rk_priv_data *bsp_priv)
+{
+ switch (bsp_priv->phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (!bsp_priv->ops->set_to_rgmii)
+ return -EINVAL;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (!bsp_priv->ops->set_to_rmii)
+ return -EINVAL;
+ break;
+ default:
+ dev_err(&bsp_priv->pdev->dev,
+ "unsupported interface %d", bsp_priv->phy_iface);
+ }
+ return 0;
+}
+
+static int rk_gmac_powerup(struct rk_priv_data *bsp_priv)
+{
+ int ret;
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ ret = rk_gmac_check_ops(bsp_priv);
+ if (ret)
+ return ret;
+
+ ret = gmac_clk_enable(bsp_priv, true);
+ if (ret)
+ return ret;
+
+ /*rmii or rgmii*/
+ switch (bsp_priv->phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ dev_info(dev, "init for RGMII\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay,
+ bsp_priv->rx_delay);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ dev_info(dev, "init for RGMII_ID\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, 0);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ dev_info(dev, "init for RGMII_RXID\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, bsp_priv->tx_delay, 0);
+ break;
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dev_info(dev, "init for RGMII_TXID\n");
+ bsp_priv->ops->set_to_rgmii(bsp_priv, 0, bsp_priv->rx_delay);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ dev_info(dev, "init for RMII\n");
+ bsp_priv->ops->set_to_rmii(bsp_priv);
+ break;
+ default:
+ dev_err(dev, "NO interface defined!\n");
+ }
+
+ ret = phy_power_on(bsp_priv, true);
+ if (ret) {
+ gmac_clk_enable(bsp_priv, false);
+ return ret;
+ }
+
+ pm_runtime_get_sync(dev);
+
+ if (bsp_priv->integrated_phy)
+ rk_gmac_integrated_phy_powerup(bsp_priv);
+
+ return 0;
+}
+
+static void rk_gmac_powerdown(struct rk_priv_data *gmac)
+{
+ if (gmac->integrated_phy)
+ rk_gmac_integrated_phy_powerdown(gmac);
+
+ pm_runtime_put_sync(&gmac->pdev->dev);
+
+ phy_power_on(gmac, false);
+ gmac_clk_enable(gmac, false);
+}
+
+static void rk_fix_speed(void *priv, unsigned int speed)
+{
+ struct rk_priv_data *bsp_priv = priv;
+ struct device *dev = &bsp_priv->pdev->dev;
+
+ switch (bsp_priv->phy_iface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (bsp_priv->ops->set_rgmii_speed)
+ bsp_priv->ops->set_rgmii_speed(bsp_priv, speed);
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (bsp_priv->ops->set_rmii_speed)
+ bsp_priv->ops->set_rmii_speed(bsp_priv, speed);
+ break;
+ default:
+ dev_err(dev, "unsupported interface %d", bsp_priv->phy_iface);
+ }
+}
+
+static int rk_gmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ const struct rk_gmac_ops *data;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no of match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ /* If the stmmac is not already selected as gmac4,
+ * then make sure we fallback to gmac.
+ */
+ if (!plat_dat->has_gmac4)
+ plat_dat->has_gmac = true;
+ plat_dat->fix_mac_speed = rk_fix_speed;
+
+ plat_dat->bsp_priv = rk_gmac_setup(pdev, plat_dat, data);
+ if (IS_ERR(plat_dat->bsp_priv)) {
+ ret = PTR_ERR(plat_dat->bsp_priv);
+ goto err_remove_config_dt;
+ }
+
+ ret = rk_gmac_clk_init(plat_dat);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = rk_gmac_powerup(plat_dat->bsp_priv);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_gmac_powerdown;
+
+ return 0;
+
+err_gmac_powerdown:
+ rk_gmac_powerdown(plat_dat->bsp_priv);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int rk_gmac_remove(struct platform_device *pdev)
+{
+ struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(&pdev->dev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
+
+ rk_gmac_powerdown(bsp_priv);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rk_gmac_suspend(struct device *dev)
+{
+ struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
+ int ret = stmmac_suspend(dev);
+
+ /* Keep the PHY up if we use Wake-on-Lan. */
+ if (!device_may_wakeup(dev)) {
+ rk_gmac_powerdown(bsp_priv);
+ bsp_priv->suspended = true;
+ }
+
+ return ret;
+}
+
+static int rk_gmac_resume(struct device *dev)
+{
+ struct rk_priv_data *bsp_priv = get_stmmac_bsp_priv(dev);
+
+ /* The PHY was up for Wake-on-Lan. */
+ if (bsp_priv->suspended) {
+ rk_gmac_powerup(bsp_priv);
+ bsp_priv->suspended = false;
+ }
+
+ return stmmac_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(rk_gmac_pm_ops, rk_gmac_suspend, rk_gmac_resume);
+
+static const struct of_device_id rk_gmac_dwmac_match[] = {
+ { .compatible = "rockchip,px30-gmac", .data = &px30_ops },
+ { .compatible = "rockchip,rk3128-gmac", .data = &rk3128_ops },
+ { .compatible = "rockchip,rk3228-gmac", .data = &rk3228_ops },
+ { .compatible = "rockchip,rk3288-gmac", .data = &rk3288_ops },
+ { .compatible = "rockchip,rk3308-gmac", .data = &rk3308_ops },
+ { .compatible = "rockchip,rk3328-gmac", .data = &rk3328_ops },
+ { .compatible = "rockchip,rk3366-gmac", .data = &rk3366_ops },
+ { .compatible = "rockchip,rk3368-gmac", .data = &rk3368_ops },
+ { .compatible = "rockchip,rk3399-gmac", .data = &rk3399_ops },
+ { .compatible = "rockchip,rk3568-gmac", .data = &rk3568_ops },
+ { .compatible = "rockchip,rk3588-gmac", .data = &rk3588_ops },
+ { .compatible = "rockchip,rv1108-gmac", .data = &rv1108_ops },
+ { .compatible = "rockchip,rv1126-gmac", .data = &rv1126_ops },
+ { }
+};
+MODULE_DEVICE_TABLE(of, rk_gmac_dwmac_match);
+
+static struct platform_driver rk_gmac_dwmac_driver = {
+ .probe = rk_gmac_probe,
+ .remove = rk_gmac_remove,
+ .driver = {
+ .name = "rk_gmac-dwmac",
+ .pm = &rk_gmac_pm_ops,
+ .of_match_table = rk_gmac_dwmac_match,
+ },
+};
+module_platform_driver(rk_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>");
+MODULE_DESCRIPTION("Rockchip RK3288 DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
new file mode 100644
index 000000000..6b447d8f0
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -0,0 +1,536 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright Altera Corporation (C) 2014. All rights reserved.
+ *
+ * Adopted from dwmac-sti.c
+ */
+
+#include <linux/mfd/altera-sysmgr.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/reset.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#include "altr_tse_pcs.h"
+
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII 0x2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_WIDTH 2
+#define SYSMGR_EMACGRP_CTRL_PHYSEL_MASK 0x00000003
+#define SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000010
+#define SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK 0x00000100
+
+#define SYSMGR_FPGAGRP_MODULE_REG 0x00000028
+#define SYSMGR_FPGAGRP_MODULE_EMAC 0x00000004
+#define SYSMGR_FPGAINTF_EMAC_REG 0x00000070
+#define SYSMGR_FPGAINTF_EMAC_BIT 0x1
+
+#define EMAC_SPLITTER_CTRL_REG 0x0
+#define EMAC_SPLITTER_CTRL_SPEED_MASK 0x3
+#define EMAC_SPLITTER_CTRL_SPEED_10 0x2
+#define EMAC_SPLITTER_CTRL_SPEED_100 0x3
+#define EMAC_SPLITTER_CTRL_SPEED_1000 0x0
+
+struct socfpga_dwmac;
+struct socfpga_dwmac_ops {
+ int (*set_phy_mode)(struct socfpga_dwmac *dwmac_priv);
+};
+
+struct socfpga_dwmac {
+ u32 reg_offset;
+ u32 reg_shift;
+ struct device *dev;
+ struct regmap *sys_mgr_base_addr;
+ struct reset_control *stmmac_rst;
+ struct reset_control *stmmac_ocp_rst;
+ void __iomem *splitter_base;
+ bool f2h_ptp_ref_clk;
+ struct tse_pcs pcs;
+ const struct socfpga_dwmac_ops *ops;
+};
+
+static void socfpga_dwmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct socfpga_dwmac *dwmac = (struct socfpga_dwmac *)priv;
+ void __iomem *splitter_base = dwmac->splitter_base;
+ void __iomem *sgmii_adapter_base = dwmac->pcs.sgmii_adapter_base;
+ struct device *dev = dwmac->dev;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct phy_device *phy_dev = ndev->phydev;
+ u32 val;
+
+ if (sgmii_adapter_base)
+ writew(SGMII_ADAPTER_DISABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+
+ if (splitter_base) {
+ val = readl(splitter_base + EMAC_SPLITTER_CTRL_REG);
+ val &= ~EMAC_SPLITTER_CTRL_SPEED_MASK;
+
+ switch (speed) {
+ case 1000:
+ val |= EMAC_SPLITTER_CTRL_SPEED_1000;
+ break;
+ case 100:
+ val |= EMAC_SPLITTER_CTRL_SPEED_100;
+ break;
+ case 10:
+ val |= EMAC_SPLITTER_CTRL_SPEED_10;
+ break;
+ default:
+ return;
+ }
+ writel(val, splitter_base + EMAC_SPLITTER_CTRL_REG);
+ }
+
+ if (phy_dev && sgmii_adapter_base) {
+ writew(SGMII_ADAPTER_ENABLE,
+ sgmii_adapter_base + SGMII_ADAPTER_CTRL_REG);
+ tse_pcs_fix_mac_speed(&dwmac->pcs, phy_dev, speed);
+ }
+}
+
+static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct regmap *sys_mgr_base_addr;
+ u32 reg_offset, reg_shift;
+ int ret, index;
+ struct device_node *np_splitter = NULL;
+ struct device_node *np_sgmii_adapter = NULL;
+ struct resource res_splitter;
+ struct resource res_tse_pcs;
+ struct resource res_sgmii_adapter;
+
+ sys_mgr_base_addr =
+ altr_sysmgr_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
+ if (IS_ERR(sys_mgr_base_addr)) {
+ dev_info(dev, "No sysmgr-syscon node found\n");
+ return PTR_ERR(sys_mgr_base_addr);
+ }
+
+ ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 1, &reg_offset);
+ if (ret) {
+ dev_info(dev, "Could not read reg_offset from sysmgr-syscon!\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32_index(np, "altr,sysmgr-syscon", 2, &reg_shift);
+ if (ret) {
+ dev_info(dev, "Could not read reg_shift from sysmgr-syscon!\n");
+ return -EINVAL;
+ }
+
+ dwmac->f2h_ptp_ref_clk = of_property_read_bool(np, "altr,f2h_ptp_ref_clk");
+
+ np_splitter = of_parse_phandle(np, "altr,emac-splitter", 0);
+ if (np_splitter) {
+ ret = of_address_to_resource(np_splitter, 0, &res_splitter);
+ of_node_put(np_splitter);
+ if (ret) {
+ dev_info(dev, "Missing emac splitter address\n");
+ return -EINVAL;
+ }
+
+ dwmac->splitter_base = devm_ioremap_resource(dev, &res_splitter);
+ if (IS_ERR(dwmac->splitter_base)) {
+ dev_info(dev, "Failed to mapping emac splitter\n");
+ return PTR_ERR(dwmac->splitter_base);
+ }
+ }
+
+ np_sgmii_adapter = of_parse_phandle(np,
+ "altr,gmii-to-sgmii-converter", 0);
+ if (np_sgmii_adapter) {
+ index = of_property_match_string(np_sgmii_adapter, "reg-names",
+ "hps_emac_interface_splitter_avalon_slave");
+
+ if (index >= 0) {
+ if (of_address_to_resource(np_sgmii_adapter, index,
+ &res_splitter)) {
+ dev_err(dev,
+ "%s: ERROR: missing emac splitter address\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ dwmac->splitter_base =
+ devm_ioremap_resource(dev, &res_splitter);
+
+ if (IS_ERR(dwmac->splitter_base)) {
+ ret = PTR_ERR(dwmac->splitter_base);
+ goto err_node_put;
+ }
+ }
+
+ index = of_property_match_string(np_sgmii_adapter, "reg-names",
+ "gmii_to_sgmii_adapter_avalon_slave");
+
+ if (index >= 0) {
+ if (of_address_to_resource(np_sgmii_adapter, index,
+ &res_sgmii_adapter)) {
+ dev_err(dev,
+ "%s: ERROR: failed mapping adapter\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ dwmac->pcs.sgmii_adapter_base =
+ devm_ioremap_resource(dev, &res_sgmii_adapter);
+
+ if (IS_ERR(dwmac->pcs.sgmii_adapter_base)) {
+ ret = PTR_ERR(dwmac->pcs.sgmii_adapter_base);
+ goto err_node_put;
+ }
+ }
+
+ index = of_property_match_string(np_sgmii_adapter, "reg-names",
+ "eth_tse_control_port");
+
+ if (index >= 0) {
+ if (of_address_to_resource(np_sgmii_adapter, index,
+ &res_tse_pcs)) {
+ dev_err(dev,
+ "%s: ERROR: failed mapping tse control port\n",
+ __func__);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ dwmac->pcs.tse_pcs_base =
+ devm_ioremap_resource(dev, &res_tse_pcs);
+
+ if (IS_ERR(dwmac->pcs.tse_pcs_base)) {
+ ret = PTR_ERR(dwmac->pcs.tse_pcs_base);
+ goto err_node_put;
+ }
+ }
+ }
+ dwmac->reg_offset = reg_offset;
+ dwmac->reg_shift = reg_shift;
+ dwmac->sys_mgr_base_addr = sys_mgr_base_addr;
+ dwmac->dev = dev;
+ of_node_put(np_sgmii_adapter);
+
+ return 0;
+
+err_node_put:
+ of_node_put(np_sgmii_adapter);
+ return ret;
+}
+
+static int socfpga_get_plat_phymode(struct socfpga_dwmac *dwmac)
+{
+ struct net_device *ndev = dev_get_drvdata(dwmac->dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ return priv->plat->interface;
+}
+
+static int socfpga_set_phy_mode_common(int phymode, u32 *val)
+{
+ switch (phymode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ case PHY_INTERFACE_MODE_GMII:
+ case PHY_INTERFACE_MODE_SGMII:
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ *val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RMII;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int socfpga_gen5_set_phy_mode(struct socfpga_dwmac *dwmac)
+{
+ struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
+ int phymode = socfpga_get_plat_phymode(dwmac);
+ u32 reg_offset = dwmac->reg_offset;
+ u32 reg_shift = dwmac->reg_shift;
+ u32 ctrl, val, module;
+
+ if (socfpga_set_phy_mode_common(phymode, &val)) {
+ dev_err(dwmac->dev, "bad phy mode %d\n", phymode);
+ return -EINVAL;
+ }
+
+ /* Overwrite val to GMII if splitter core is enabled. The phymode here
+ * is the actual phy mode on phy hardware, but phy interface from
+ * EMAC core is GMII.
+ */
+ if (dwmac->splitter_base)
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+
+ /* Assert reset to the enet controller before changing the phy mode */
+ reset_control_assert(dwmac->stmmac_ocp_rst);
+ reset_control_assert(dwmac->stmmac_rst);
+
+ regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
+ ctrl |= val << reg_shift;
+
+ if (dwmac->f2h_ptp_ref_clk ||
+ phymode == PHY_INTERFACE_MODE_MII ||
+ phymode == PHY_INTERFACE_MODE_GMII ||
+ phymode == PHY_INTERFACE_MODE_SGMII) {
+ regmap_read(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ &module);
+ module |= (SYSMGR_FPGAGRP_MODULE_EMAC << (reg_shift / 2));
+ regmap_write(sys_mgr_base_addr, SYSMGR_FPGAGRP_MODULE_REG,
+ module);
+ }
+
+ if (dwmac->f2h_ptp_ref_clk)
+ ctrl |= SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK << (reg_shift / 2);
+ else
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PTP_REF_CLK_MASK <<
+ (reg_shift / 2));
+
+ regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+
+ /* Deassert reset for the phy configuration to be sampled by
+ * the enet controller, and operation to start in requested mode
+ */
+ reset_control_deassert(dwmac->stmmac_ocp_rst);
+ reset_control_deassert(dwmac->stmmac_rst);
+ if (phymode == PHY_INTERFACE_MODE_SGMII) {
+ if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
+ dev_err(dwmac->dev, "Unable to initialize TSE PCS");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int socfpga_gen10_set_phy_mode(struct socfpga_dwmac *dwmac)
+{
+ struct regmap *sys_mgr_base_addr = dwmac->sys_mgr_base_addr;
+ int phymode = socfpga_get_plat_phymode(dwmac);
+ u32 reg_offset = dwmac->reg_offset;
+ u32 reg_shift = dwmac->reg_shift;
+ u32 ctrl, val, module;
+
+ if (socfpga_set_phy_mode_common(phymode, &val))
+ return -EINVAL;
+
+ /* Overwrite val to GMII if splitter core is enabled. The phymode here
+ * is the actual phy mode on phy hardware, but phy interface from
+ * EMAC core is GMII.
+ */
+ if (dwmac->splitter_base)
+ val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
+
+ /* Assert reset to the enet controller before changing the phy mode */
+ reset_control_assert(dwmac->stmmac_ocp_rst);
+ reset_control_assert(dwmac->stmmac_rst);
+
+ regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
+ ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK);
+ ctrl |= val;
+
+ if (dwmac->f2h_ptp_ref_clk ||
+ phymode == PHY_INTERFACE_MODE_MII ||
+ phymode == PHY_INTERFACE_MODE_GMII ||
+ phymode == PHY_INTERFACE_MODE_SGMII) {
+ ctrl |= SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK;
+ regmap_read(sys_mgr_base_addr, SYSMGR_FPGAINTF_EMAC_REG,
+ &module);
+ module |= (SYSMGR_FPGAINTF_EMAC_BIT << reg_shift);
+ regmap_write(sys_mgr_base_addr, SYSMGR_FPGAINTF_EMAC_REG,
+ module);
+ } else {
+ ctrl &= ~SYSMGR_GEN10_EMACGRP_CTRL_PTP_REF_CLK_MASK;
+ }
+
+ regmap_write(sys_mgr_base_addr, reg_offset, ctrl);
+
+ /* Deassert reset for the phy configuration to be sampled by
+ * the enet controller, and operation to start in requested mode
+ */
+ reset_control_deassert(dwmac->stmmac_ocp_rst);
+ reset_control_deassert(dwmac->stmmac_rst);
+ if (phymode == PHY_INTERFACE_MODE_SGMII) {
+ if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
+ dev_err(dwmac->dev, "Unable to initialize TSE PCS");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static int socfpga_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct device *dev = &pdev->dev;
+ int ret;
+ struct socfpga_dwmac *dwmac;
+ struct net_device *ndev;
+ struct stmmac_priv *stpriv;
+ const struct socfpga_dwmac_ops *ops;
+
+ ops = device_get_match_data(&pdev->dev);
+ if (!ops) {
+ dev_err(&pdev->dev, "no of match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
+ if (IS_ERR(dwmac->stmmac_ocp_rst)) {
+ ret = PTR_ERR(dwmac->stmmac_ocp_rst);
+ dev_err(dev, "error getting reset control of ocp %d\n", ret);
+ goto err_remove_config_dt;
+ }
+
+ reset_control_deassert(dwmac->stmmac_ocp_rst);
+
+ ret = socfpga_dwmac_parse_data(dwmac, dev);
+ if (ret) {
+ dev_err(dev, "Unable to parse OF data\n");
+ goto err_remove_config_dt;
+ }
+
+ dwmac->ops = ops;
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ndev = platform_get_drvdata(pdev);
+ stpriv = netdev_priv(ndev);
+
+ /* The socfpga driver needs to control the stmmac reset to set the phy
+ * mode. Create a copy of the core reset handle so it can be used by
+ * the driver later.
+ */
+ dwmac->stmmac_rst = stpriv->plat->stmmac_rst;
+
+ ret = ops->set_phy_mode(dwmac);
+ if (ret)
+ goto err_dvr_remove;
+
+ return 0;
+
+err_dvr_remove:
+ stmmac_dvr_remove(&pdev->dev);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int socfpga_dwmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct socfpga_dwmac *dwmac_priv = get_stmmac_bsp_priv(dev);
+
+ dwmac_priv->ops->set_phy_mode(priv->plat->bsp_priv);
+
+ /* Before the enet controller is suspended, the phy is suspended.
+ * This causes the phy clock to be gated. The enet controller is
+ * resumed before the phy, so the clock is still gated "off" when
+ * the enet controller is resumed. This code makes sure the phy
+ * is "resumed" before reinitializing the enet controller since
+ * the enet controller depends on an active phy clock to complete
+ * a DMA reset. A DMA reset will "time out" if executed
+ * with no phy clock input on the Synopsys enet controller.
+ * Verified through Synopsys Case #8000711656.
+ *
+ * Note that the phy clock is also gated when the phy is isolated.
+ * Phy "suspend" and "isolate" controls are located in phy basic
+ * control register 0, and can be modified by the phy driver
+ * framework.
+ */
+ if (ndev->phydev)
+ phy_resume(ndev->phydev);
+
+ return stmmac_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int __maybe_unused socfpga_dwmac_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_bus_clks_config(priv, false);
+
+ return 0;
+}
+
+static int __maybe_unused socfpga_dwmac_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ return stmmac_bus_clks_config(priv, true);
+}
+
+static const struct dev_pm_ops socfpga_dwmac_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stmmac_suspend, socfpga_dwmac_resume)
+ SET_RUNTIME_PM_OPS(socfpga_dwmac_runtime_suspend, socfpga_dwmac_runtime_resume, NULL)
+};
+
+static const struct socfpga_dwmac_ops socfpga_gen5_ops = {
+ .set_phy_mode = socfpga_gen5_set_phy_mode,
+};
+
+static const struct socfpga_dwmac_ops socfpga_gen10_ops = {
+ .set_phy_mode = socfpga_gen10_set_phy_mode,
+};
+
+static const struct of_device_id socfpga_dwmac_match[] = {
+ { .compatible = "altr,socfpga-stmmac", .data = &socfpga_gen5_ops },
+ { .compatible = "altr,socfpga-stmmac-a10-s10", .data = &socfpga_gen10_ops },
+ { }
+};
+MODULE_DEVICE_TABLE(of, socfpga_dwmac_match);
+
+static struct platform_driver socfpga_dwmac_driver = {
+ .probe = socfpga_dwmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "socfpga-dwmac",
+ .pm = &socfpga_dwmac_pm_ops,
+ .of_match_table = socfpga_dwmac_match,
+ },
+};
+module_platform_driver(socfpga_dwmac_driver);
+
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
new file mode 100644
index 000000000..710d74357
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -0,0 +1,436 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ * Contributors: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+#include <linux/phy.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+
+#include "stmmac_platform.h"
+
+#define DWMAC_125MHZ 125000000
+#define DWMAC_50MHZ 50000000
+#define DWMAC_25MHZ 25000000
+#define DWMAC_2_5MHZ 2500000
+
+#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
+ iface == PHY_INTERFACE_MODE_RGMII_ID || \
+ iface == PHY_INTERFACE_MODE_RGMII_RXID || \
+ iface == PHY_INTERFACE_MODE_RGMII_TXID)
+
+#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+ iface == PHY_INTERFACE_MODE_GMII)
+
+/* STiH4xx register definitions (STiH415/STiH416/STiH407/STiH410 families)
+ *
+ * Below table summarizes the clock requirement and clock sources for
+ * supported phy interface modes with link speeds.
+ * ________________________________________________
+ *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link |
+ * ------------------------------------------------
+ *| MII | n/a | 25Mhz |
+ *| | | txclk |
+ * ------------------------------------------------
+ *| GMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | txclk |
+ * ------------------------------------------------
+ *| RGMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | clkgen |
+ *| | clkgen | |
+ * ------------------------------------------------
+ *| RMII | n/a | 25Mhz |
+ *| | |clkgen/phyclk-in |
+ * ------------------------------------------------
+ *
+ * Register Configuration
+ *-------------------------------
+ * src |BIT(8)| BIT(7)| BIT(6)|
+ *-------------------------------
+ * txclk | 0 | n/a | 1 |
+ *-------------------------------
+ * ck_125| 0 | n/a | 0 |
+ *-------------------------------
+ * phyclk| 1 | 0 | n/a |
+ *-------------------------------
+ * clkgen| 1 | 1 | n/a |
+ *-------------------------------
+ */
+
+#define STIH4XX_RETIME_SRC_MASK GENMASK(8, 6)
+#define STIH4XX_ETH_SEL_TX_RETIME_CLK BIT(8)
+#define STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
+#define STIH4XX_ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
+
+/* STiD127 register definitions
+ *-----------------------
+ * src |BIT(6)| BIT(7)|
+ *-----------------------
+ * MII | 1 | n/a |
+ *-----------------------
+ * RMII | n/a | 1 |
+ * clkgen| | |
+ *-----------------------
+ * RMII | n/a | 0 |
+ * phyclk| | |
+ *-----------------------
+ * RGMII | 1 | n/a |
+ * clkgen| | |
+ *-----------------------
+ */
+
+#define STID127_RETIME_SRC_MASK GENMASK(7, 6)
+#define STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
+#define STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK BIT(6)
+
+#define ENMII_MASK GENMASK(5, 5)
+#define ENMII BIT(5)
+#define EN_MASK GENMASK(1, 1)
+#define EN BIT(1)
+
+/*
+ * 3 bits [4:2]
+ * 000-GMII/MII
+ * 001-RGMII
+ * 010-SGMII
+ * 100-RMII
+ */
+#define MII_PHY_SEL_MASK GENMASK(4, 2)
+#define ETH_PHY_SEL_RMII BIT(4)
+#define ETH_PHY_SEL_SGMII BIT(3)
+#define ETH_PHY_SEL_RGMII BIT(2)
+#define ETH_PHY_SEL_GMII 0x0
+#define ETH_PHY_SEL_MII 0x0
+
+struct sti_dwmac {
+ phy_interface_t interface; /* MII interface */
+ bool ext_phyclk; /* Clock from external PHY */
+ u32 tx_retime_src; /* TXCLK Retiming*/
+ struct clk *clk; /* PHY clock */
+ u32 ctrl_reg; /* GMAC glue-logic control register */
+ int clk_sel_reg; /* GMAC ext clk selection register */
+ struct regmap *regmap;
+ bool gmac_en;
+ u32 speed;
+ void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+struct sti_dwmac_of_data {
+ void (*fix_retime_src)(void *priv, unsigned int speed);
+};
+
+static u32 phy_intf_sels[] = {
+ [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
+ [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
+ [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
+ [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
+};
+
+enum {
+ TX_RETIME_SRC_NA = 0,
+ TX_RETIME_SRC_TXCLK = 1,
+ TX_RETIME_SRC_CLK_125,
+ TX_RETIME_SRC_PHYCLK,
+ TX_RETIME_SRC_CLKGEN,
+};
+
+static u32 stih4xx_tx_retime_val[] = {
+ [TX_RETIME_SRC_TXCLK] = STIH4XX_ETH_SEL_TXCLK_NOT_CLK125,
+ [TX_RETIME_SRC_CLK_125] = 0x0,
+ [TX_RETIME_SRC_PHYCLK] = STIH4XX_ETH_SEL_TX_RETIME_CLK,
+ [TX_RETIME_SRC_CLKGEN] = STIH4XX_ETH_SEL_TX_RETIME_CLK
+ | STIH4XX_ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
+};
+
+static void stih4xx_fix_retime_src(void *priv, u32 spd)
+{
+ struct sti_dwmac *dwmac = priv;
+ u32 src = dwmac->tx_retime_src;
+ u32 reg = dwmac->ctrl_reg;
+ u32 freq = 0;
+
+ if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+ src = TX_RETIME_SRC_TXCLK;
+ } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ if (dwmac->ext_phyclk) {
+ src = TX_RETIME_SRC_PHYCLK;
+ } else {
+ src = TX_RETIME_SRC_CLKGEN;
+ freq = DWMAC_50MHZ;
+ }
+ } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ /* On GiGa clk source can be either ext or from clkgen */
+ if (spd == SPEED_1000) {
+ freq = DWMAC_125MHZ;
+ } else {
+ /* Switch to clkgen for these speeds */
+ src = TX_RETIME_SRC_CLKGEN;
+ if (spd == SPEED_100)
+ freq = DWMAC_25MHZ;
+ else if (spd == SPEED_10)
+ freq = DWMAC_2_5MHZ;
+ }
+ }
+
+ if (src == TX_RETIME_SRC_CLKGEN && freq)
+ clk_set_rate(dwmac->clk, freq);
+
+ regmap_update_bits(dwmac->regmap, reg, STIH4XX_RETIME_SRC_MASK,
+ stih4xx_tx_retime_val[src]);
+}
+
+static void stid127_fix_retime_src(void *priv, u32 spd)
+{
+ struct sti_dwmac *dwmac = priv;
+ u32 reg = dwmac->ctrl_reg;
+ u32 freq = 0;
+ u32 val = 0;
+
+ if (dwmac->interface == PHY_INTERFACE_MODE_MII) {
+ val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK;
+ } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ if (!dwmac->ext_phyclk) {
+ val = STID127_ETH_SEL_INTERNAL_NOTEXT_PHYCLK;
+ freq = DWMAC_50MHZ;
+ }
+ } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ val = STID127_ETH_SEL_INTERNAL_NOTEXT_TXCLK;
+ if (spd == SPEED_1000)
+ freq = DWMAC_125MHZ;
+ else if (spd == SPEED_100)
+ freq = DWMAC_25MHZ;
+ else if (spd == SPEED_10)
+ freq = DWMAC_2_5MHZ;
+ }
+
+ if (freq)
+ clk_set_rate(dwmac->clk, freq);
+
+ regmap_update_bits(dwmac->regmap, reg, STID127_RETIME_SRC_MASK, val);
+}
+
+static int sti_dwmac_set_mode(struct sti_dwmac *dwmac)
+{
+ struct regmap *regmap = dwmac->regmap;
+ int iface = dwmac->interface;
+ u32 reg = dwmac->ctrl_reg;
+ u32 val;
+
+ if (dwmac->gmac_en)
+ regmap_update_bits(regmap, reg, EN_MASK, EN);
+
+ regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
+
+ val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
+ regmap_update_bits(regmap, reg, ENMII_MASK, val);
+
+ dwmac->fix_retime_src(dwmac, dwmac->speed);
+
+ return 0;
+}
+
+static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ int err;
+
+ /* clk selection from extra syscfg register */
+ dwmac->clk_sel_reg = -ENXIO;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-clkconf");
+ if (res)
+ dwmac->clk_sel_reg = res->start;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->ctrl_reg);
+ if (err) {
+ dev_err(dev, "Can't get sysconfig ctrl offset (%d)\n", err);
+ return err;
+ }
+
+ err = of_get_phy_mode(np, &dwmac->interface);
+ if (err && err != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ return err;
+ }
+
+ dwmac->regmap = regmap;
+ dwmac->gmac_en = of_property_read_bool(np, "st,gmac_en");
+ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+ dwmac->tx_retime_src = TX_RETIME_SRC_NA;
+ dwmac->speed = SPEED_100;
+
+ if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
+ const char *rs;
+
+ dwmac->tx_retime_src = TX_RETIME_SRC_CLKGEN;
+
+ err = of_property_read_string(np, "st,tx-retime-src", &rs);
+ if (err < 0) {
+ dev_warn(dev, "Use internal clock source\n");
+ } else {
+ if (!strcasecmp(rs, "clk_125"))
+ dwmac->tx_retime_src = TX_RETIME_SRC_CLK_125;
+ else if (!strcasecmp(rs, "txclk"))
+ dwmac->tx_retime_src = TX_RETIME_SRC_TXCLK;
+ }
+ dwmac->speed = SPEED_1000;
+ }
+
+ dwmac->clk = devm_clk_get(dev, "sti-ethclk");
+ if (IS_ERR(dwmac->clk)) {
+ dev_warn(dev, "No phy clock provided...\n");
+ dwmac->clk = NULL;
+ }
+
+ return 0;
+}
+
+static int sti_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ const struct sti_dwmac_of_data *data;
+ struct stmmac_resources stmmac_res;
+ struct sti_dwmac *dwmac;
+ int ret;
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "No OF match data provided\n");
+ return -EINVAL;
+ }
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ ret = sti_dwmac_parse_data(dwmac, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+ goto err_remove_config_dt;
+ }
+
+ dwmac->fix_retime_src = data->fix_retime_src;
+
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = data->fix_retime_src;
+
+ ret = clk_prepare_enable(dwmac->clk);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = sti_dwmac_set_mode(dwmac);
+ if (ret)
+ goto disable_clk;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto disable_clk;
+
+ return 0;
+
+disable_clk:
+ clk_disable_unprepare(dwmac->clk);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int sti_dwmac_remove(struct platform_device *pdev)
+{
+ struct sti_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
+
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int sti_dwmac_suspend(struct device *dev)
+{
+ struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
+ int ret = stmmac_suspend(dev);
+
+ clk_disable_unprepare(dwmac->clk);
+
+ return ret;
+}
+
+static int sti_dwmac_resume(struct device *dev)
+{
+ struct sti_dwmac *dwmac = get_stmmac_bsp_priv(dev);
+
+ clk_prepare_enable(dwmac->clk);
+ sti_dwmac_set_mode(dwmac);
+
+ return stmmac_resume(dev);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(sti_dwmac_pm_ops, sti_dwmac_suspend,
+ sti_dwmac_resume);
+
+static const struct sti_dwmac_of_data stih4xx_dwmac_data = {
+ .fix_retime_src = stih4xx_fix_retime_src,
+};
+
+static const struct sti_dwmac_of_data stid127_dwmac_data = {
+ .fix_retime_src = stid127_fix_retime_src,
+};
+
+static const struct of_device_id sti_dwmac_match[] = {
+ { .compatible = "st,stih415-dwmac", .data = &stih4xx_dwmac_data},
+ { .compatible = "st,stih416-dwmac", .data = &stih4xx_dwmac_data},
+ { .compatible = "st,stid127-dwmac", .data = &stid127_dwmac_data},
+ { .compatible = "st,stih407-dwmac", .data = &stih4xx_dwmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, sti_dwmac_match);
+
+static struct platform_driver sti_dwmac_driver = {
+ .probe = sti_dwmac_probe,
+ .remove = sti_dwmac_remove,
+ .driver = {
+ .name = "sti-dwmac",
+ .pm = &sti_dwmac_pm_ops,
+ .of_match_table = sti_dwmac_match,
+ },
+};
+module_platform_driver(sti_dwmac_driver);
+
+MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
new file mode 100644
index 000000000..533f5245a
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c
@@ -0,0 +1,545 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dwmac-stm32.c - DWMAC Specific Glue layer for STM32 MCU
+ *
+ * Copyright (C) STMicroelectronics SA 2017
+ * Author: Alexandre Torgue <alexandre.torgue@st.com> for STMicroelectronics.
+ */
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_wakeirq.h>
+#include <linux/regmap.h>
+#include <linux/slab.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+
+#define SYSCFG_MCU_ETH_MASK BIT(23)
+#define SYSCFG_MP1_ETH_MASK GENMASK(23, 16)
+#define SYSCFG_PMCCLRR_OFFSET 0x40
+
+#define SYSCFG_PMCR_ETH_CLK_SEL BIT(16)
+#define SYSCFG_PMCR_ETH_REF_CLK_SEL BIT(17)
+
+/* CLOCK feed to PHY*/
+#define ETH_CK_F_25M 25000000
+#define ETH_CK_F_50M 50000000
+#define ETH_CK_F_125M 125000000
+
+/* Ethernet PHY interface selection in register SYSCFG Configuration
+ *------------------------------------------
+ * src |BIT(23)| BIT(22)| BIT(21)|BIT(20)|
+ *------------------------------------------
+ * MII | 0 | 0 | 0 | 1 |
+ *------------------------------------------
+ * GMII | 0 | 0 | 0 | 0 |
+ *------------------------------------------
+ * RGMII | 0 | 0 | 1 | n/a |
+ *------------------------------------------
+ * RMII | 1 | 0 | 0 | n/a |
+ *------------------------------------------
+ */
+#define SYSCFG_PMCR_ETH_SEL_MII BIT(20)
+#define SYSCFG_PMCR_ETH_SEL_RGMII BIT(21)
+#define SYSCFG_PMCR_ETH_SEL_RMII BIT(23)
+#define SYSCFG_PMCR_ETH_SEL_GMII 0
+#define SYSCFG_MCU_ETH_SEL_MII 0
+#define SYSCFG_MCU_ETH_SEL_RMII 1
+
+/* STM32MP1 register definitions
+ *
+ * Below table summarizes the clock requirement and clock sources for
+ * supported phy interface modes.
+ * __________________________________________________________________________
+ *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125Mhz from PHY|
+ *| | | 25MHz | 50MHz | |
+ * ---------------------------------------------------------------------------
+ *| MII | - | eth-ck | n/a | n/a |
+ *| | | st,ext-phyclk | | |
+ * ---------------------------------------------------------------------------
+ *| GMII | - | eth-ck | n/a | n/a |
+ *| | | st,ext-phyclk | | |
+ * ---------------------------------------------------------------------------
+ *| RGMII | - | eth-ck | n/a | eth-ck |
+ *| | | st,ext-phyclk | | st,eth-clk-sel or|
+ *| | | | | st,ext-phyclk |
+ * ---------------------------------------------------------------------------
+ *| RMII | - | eth-ck | eth-ck | n/a |
+ *| | | st,ext-phyclk | st,eth-ref-clk-sel | |
+ *| | | | or st,ext-phyclk | |
+ * ---------------------------------------------------------------------------
+ *
+ */
+
+struct stm32_dwmac {
+ struct clk *clk_tx;
+ struct clk *clk_rx;
+ struct clk *clk_eth_ck;
+ struct clk *clk_ethstp;
+ struct clk *syscfg_clk;
+ int ext_phyclk;
+ int enable_eth_ck;
+ int eth_clk_sel_reg;
+ int eth_ref_clk_sel_reg;
+ int irq_pwr_wakeup;
+ u32 mode_reg; /* MAC glue-logic mode register */
+ struct regmap *regmap;
+ u32 speed;
+ const struct stm32_ops *ops;
+ struct device *dev;
+};
+
+struct stm32_ops {
+ int (*set_mode)(struct plat_stmmacenet_data *plat_dat);
+ int (*clk_prepare)(struct stm32_dwmac *dwmac, bool prepare);
+ int (*suspend)(struct stm32_dwmac *dwmac);
+ void (*resume)(struct stm32_dwmac *dwmac);
+ int (*parse_data)(struct stm32_dwmac *dwmac,
+ struct device *dev);
+ u32 syscfg_eth_mask;
+ bool clk_rx_enable_in_suspend;
+};
+
+static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ int ret;
+
+ if (dwmac->ops->set_mode) {
+ ret = dwmac->ops->set_mode(plat_dat);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwmac->clk_tx);
+ if (ret)
+ return ret;
+
+ if (!dwmac->ops->clk_rx_enable_in_suspend ||
+ !dwmac->dev->power.is_suspended) {
+ ret = clk_prepare_enable(dwmac->clk_rx);
+ if (ret) {
+ clk_disable_unprepare(dwmac->clk_tx);
+ return ret;
+ }
+ }
+
+ if (dwmac->ops->clk_prepare) {
+ ret = dwmac->ops->clk_prepare(dwmac, true);
+ if (ret) {
+ clk_disable_unprepare(dwmac->clk_rx);
+ clk_disable_unprepare(dwmac->clk_tx);
+ }
+ }
+
+ return ret;
+}
+
+static int stm32mp1_clk_prepare(struct stm32_dwmac *dwmac, bool prepare)
+{
+ int ret = 0;
+
+ if (prepare) {
+ ret = clk_prepare_enable(dwmac->syscfg_clk);
+ if (ret)
+ return ret;
+ if (dwmac->enable_eth_ck) {
+ ret = clk_prepare_enable(dwmac->clk_eth_ck);
+ if (ret) {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ if (dwmac->enable_eth_ck)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
+ }
+ return ret;
+}
+
+static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg, clk_rate;
+ int val;
+
+ clk_rate = clk_get_rate(dwmac->clk_eth_ck);
+ dwmac->enable_eth_ck = false;
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk)
+ dwmac->enable_eth_ck = true;
+ val = SYSCFG_PMCR_ETH_SEL_MII;
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
+ break;
+ case PHY_INTERFACE_MODE_GMII:
+ val = SYSCFG_PMCR_ETH_SEL_GMII;
+ if (clk_rate == ETH_CK_F_25M &&
+ (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
+ val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ }
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n");
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = SYSCFG_PMCR_ETH_SEL_RMII;
+ if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) &&
+ (dwmac->eth_ref_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
+ val |= SYSCFG_PMCR_ETH_REF_CLK_SEL;
+ }
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ val = SYSCFG_PMCR_ETH_SEL_RGMII;
+ if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) &&
+ (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) {
+ dwmac->enable_eth_ck = true;
+ val |= SYSCFG_PMCR_ETH_CLK_SEL;
+ }
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n");
+ break;
+ default:
+ pr_debug("SYSCFG init : Do not manage %d interface\n",
+ plat_dat->interface);
+ /* Do not manage others interfaces */
+ return -EINVAL;
+ }
+
+ /* Need to update PMCCLRR (clear register) */
+ regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET,
+ dwmac->ops->syscfg_eth_mask);
+
+ /* Update PMCSETR (set register) */
+ return regmap_update_bits(dwmac->regmap, reg,
+ dwmac->ops->syscfg_eth_mask, val);
+}
+
+static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat)
+{
+ struct stm32_dwmac *dwmac = plat_dat->bsp_priv;
+ u32 reg = dwmac->mode_reg;
+ int val;
+
+ switch (plat_dat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ val = SYSCFG_MCU_ETH_SEL_MII;
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n");
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ val = SYSCFG_MCU_ETH_SEL_RMII;
+ pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n");
+ break;
+ default:
+ pr_debug("SYSCFG init : Do not manage %d interface\n",
+ plat_dat->interface);
+ /* Do not manage others interfaces */
+ return -EINVAL;
+ }
+
+ return regmap_update_bits(dwmac->regmap, reg,
+ dwmac->ops->syscfg_eth_mask, val << 23);
+}
+
+static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac)
+{
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_rx);
+
+ if (dwmac->ops->clk_prepare)
+ dwmac->ops->clk_prepare(dwmac, false);
+}
+
+static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac,
+ struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ int err;
+
+ /* Get TX/RX clocks */
+ dwmac->clk_tx = devm_clk_get(dev, "mac-clk-tx");
+ if (IS_ERR(dwmac->clk_tx)) {
+ dev_err(dev, "No ETH Tx clock provided...\n");
+ return PTR_ERR(dwmac->clk_tx);
+ }
+
+ dwmac->clk_rx = devm_clk_get(dev, "mac-clk-rx");
+ if (IS_ERR(dwmac->clk_rx)) {
+ dev_err(dev, "No ETH Rx clock provided...\n");
+ return PTR_ERR(dwmac->clk_rx);
+ }
+
+ if (dwmac->ops->parse_data) {
+ err = dwmac->ops->parse_data(dwmac, dev);
+ if (err)
+ return err;
+ }
+
+ /* Get mode register */
+ dwmac->regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ if (IS_ERR(dwmac->regmap))
+ return PTR_ERR(dwmac->regmap);
+
+ err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg);
+ if (err)
+ dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err);
+
+ return err;
+}
+
+static int stm32mp1_parse_data(struct stm32_dwmac *dwmac,
+ struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct device_node *np = dev->of_node;
+ int err = 0;
+
+ /* Ethernet PHY have no crystal */
+ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+
+ /* Gigabit Ethernet 125MHz clock selection. */
+ dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel");
+
+ /* Ethernet 50Mhz RMII clock selection */
+ dwmac->eth_ref_clk_sel_reg =
+ of_property_read_bool(np, "st,eth-ref-clk-sel");
+
+ /* Get ETH_CLK clocks */
+ dwmac->clk_eth_ck = devm_clk_get(dev, "eth-ck");
+ if (IS_ERR(dwmac->clk_eth_ck)) {
+ dev_info(dev, "No phy clock provided...\n");
+ dwmac->clk_eth_ck = NULL;
+ }
+
+ /* Clock used for low power mode */
+ dwmac->clk_ethstp = devm_clk_get(dev, "ethstp");
+ if (IS_ERR(dwmac->clk_ethstp)) {
+ dev_err(dev,
+ "No ETH peripheral clock provided for CStop mode ...\n");
+ return PTR_ERR(dwmac->clk_ethstp);
+ }
+
+ /* Optional Clock for sysconfig */
+ dwmac->syscfg_clk = devm_clk_get(dev, "syscfg-clk");
+ if (IS_ERR(dwmac->syscfg_clk))
+ dwmac->syscfg_clk = NULL;
+
+ /* Get IRQ information early to have an ability to ask for deferred
+ * probe if needed before we went too far with resource allocation.
+ */
+ dwmac->irq_pwr_wakeup = platform_get_irq_byname_optional(pdev,
+ "stm32_pwr_wakeup");
+ if (dwmac->irq_pwr_wakeup == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (!dwmac->clk_eth_ck && dwmac->irq_pwr_wakeup >= 0) {
+ err = device_init_wakeup(&pdev->dev, true);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to init wake up irq\n");
+ return err;
+ }
+ err = dev_pm_set_dedicated_wake_irq(&pdev->dev,
+ dwmac->irq_pwr_wakeup);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to set wake up irq\n");
+ device_init_wakeup(&pdev->dev, false);
+ }
+ device_set_wakeup_enable(&pdev->dev, false);
+ }
+ return err;
+}
+
+static int stm32_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct stm32_dwmac *dwmac;
+ const struct stm32_ops *data;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ dev_err(&pdev->dev, "no of match data provided\n");
+ ret = -EINVAL;
+ goto err_remove_config_dt;
+ }
+
+ dwmac->ops = data;
+ dwmac->dev = &pdev->dev;
+
+ ret = stm32_dwmac_parse_data(dwmac, &pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+ goto err_remove_config_dt;
+ }
+
+ plat_dat->bsp_priv = dwmac;
+
+ ret = stm32_dwmac_init(plat_dat);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_clk_disable;
+
+ return 0;
+
+err_clk_disable:
+ stm32_dwmac_clk_disable(dwmac);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int stm32_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = stmmac_dvr_remove(&pdev->dev);
+ struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
+
+ stm32_dwmac_clk_disable(priv->plat->bsp_priv);
+
+ if (dwmac->irq_pwr_wakeup >= 0) {
+ dev_pm_clear_wake_irq(&pdev->dev);
+ device_init_wakeup(&pdev->dev, false);
+ }
+
+ return ret;
+}
+
+static int stm32mp1_suspend(struct stm32_dwmac *dwmac)
+{
+ int ret = 0;
+
+ ret = clk_prepare_enable(dwmac->clk_ethstp);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->syscfg_clk);
+ if (dwmac->enable_eth_ck)
+ clk_disable_unprepare(dwmac->clk_eth_ck);
+
+ return ret;
+}
+
+static void stm32mp1_resume(struct stm32_dwmac *dwmac)
+{
+ clk_disable_unprepare(dwmac->clk_ethstp);
+}
+
+static int stm32mcu_suspend(struct stm32_dwmac *dwmac)
+{
+ clk_disable_unprepare(dwmac->clk_tx);
+ clk_disable_unprepare(dwmac->clk_rx);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int stm32_dwmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
+
+ int ret;
+
+ ret = stmmac_suspend(dev);
+
+ if (dwmac->ops->suspend)
+ ret = dwmac->ops->suspend(dwmac);
+
+ return ret;
+}
+
+static int stm32_dwmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct stm32_dwmac *dwmac = priv->plat->bsp_priv;
+ int ret;
+
+ if (dwmac->ops->resume)
+ dwmac->ops->resume(dwmac);
+
+ ret = stm32_dwmac_init(priv->plat);
+ if (ret)
+ return ret;
+
+ ret = stmmac_resume(dev);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops,
+ stm32_dwmac_suspend, stm32_dwmac_resume);
+
+static struct stm32_ops stm32mcu_dwmac_data = {
+ .set_mode = stm32mcu_set_mode,
+ .suspend = stm32mcu_suspend,
+ .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK
+};
+
+static struct stm32_ops stm32mp1_dwmac_data = {
+ .set_mode = stm32mp1_set_mode,
+ .clk_prepare = stm32mp1_clk_prepare,
+ .suspend = stm32mp1_suspend,
+ .resume = stm32mp1_resume,
+ .parse_data = stm32mp1_parse_data,
+ .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK,
+ .clk_rx_enable_in_suspend = true
+};
+
+static const struct of_device_id stm32_dwmac_match[] = {
+ { .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data},
+ { .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data},
+ { }
+};
+MODULE_DEVICE_TABLE(of, stm32_dwmac_match);
+
+static struct platform_driver stm32_dwmac_driver = {
+ .probe = stm32_dwmac_probe,
+ .remove = stm32_dwmac_remove,
+ .driver = {
+ .name = "stm32-dwmac",
+ .pm = &stm32_dwmac_pm_ops,
+ .of_match_table = stm32_dwmac_match,
+ },
+};
+module_platform_driver(stm32_dwmac_driver);
+
+MODULE_AUTHOR("Alexandre Torgue <alexandre.torgue@gmail.com>");
+MODULE_AUTHOR("Christophe Roullier <christophe.roullier@st.com>");
+MODULE_DESCRIPTION("STMicroelectronics STM32 DWMAC Specific Glue layer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
new file mode 100644
index 000000000..f83447259
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -0,0 +1,1344 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dwmac-sun8i.c - Allwinner sun8i DWMAC specific glue layer
+ *
+ * Copyright (C) 2017 Corentin Labbe <clabbe.montjoie@gmail.com>
+ */
+
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mdio-mux.h>
+#include <linux/mfd/syscon.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/stmmac.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+/* General notes on dwmac-sun8i:
+ * Locking: no locking is necessary in this file because all necessary locking
+ * is done in the "stmmac files"
+ */
+
+/* struct emac_variant - Describe dwmac-sun8i hardware variant
+ * @default_syscon_value: The default value of the EMAC register in syscon
+ * This value is used for disabling properly EMAC
+ * and used as a good starting value in case of the
+ * boot process(uboot) leave some stuff.
+ * @syscon_field reg_field for the syscon's gmac register
+ * @soc_has_internal_phy: Does the MAC embed an internal PHY
+ * @support_mii: Does the MAC handle MII
+ * @support_rmii: Does the MAC handle RMII
+ * @support_rgmii: Does the MAC handle RGMII
+ *
+ * @rx_delay_max: Maximum raw value for RX delay chain
+ * @tx_delay_max: Maximum raw value for TX delay chain
+ * These two also indicate the bitmask for
+ * the RX and TX delay chain registers. A
+ * value of zero indicates this is not supported.
+ */
+struct emac_variant {
+ u32 default_syscon_value;
+ const struct reg_field *syscon_field;
+ bool soc_has_internal_phy;
+ bool support_mii;
+ bool support_rmii;
+ bool support_rgmii;
+ u8 rx_delay_max;
+ u8 tx_delay_max;
+};
+
+/* struct sunxi_priv_data - hold all sunxi private data
+ * @ephy_clk: reference to the optional EPHY clock for the internal PHY
+ * @regulator: reference to the optional regulator
+ * @rst_ephy: reference to the optional EPHY reset for the internal PHY
+ * @variant: reference to the current board variant
+ * @regmap: regmap for using the syscon
+ * @internal_phy_powered: Does the internal PHY is enabled
+ * @use_internal_phy: Is the internal PHY selected for use
+ * @mux_handle: Internal pointer used by mdio-mux lib
+ */
+struct sunxi_priv_data {
+ struct clk *ephy_clk;
+ struct regulator *regulator;
+ struct reset_control *rst_ephy;
+ const struct emac_variant *variant;
+ struct regmap_field *regmap_field;
+ bool internal_phy_powered;
+ bool use_internal_phy;
+ void *mux_handle;
+};
+
+/* EMAC clock register @ 0x30 in the "system control" address range */
+static const struct reg_field sun8i_syscon_reg_field = {
+ .reg = 0x30,
+ .lsb = 0,
+ .msb = 31,
+};
+
+/* EMAC clock register @ 0x164 in the CCU address range */
+static const struct reg_field sun8i_ccu_reg_field = {
+ .reg = 0x164,
+ .lsb = 0,
+ .msb = 31,
+};
+
+static const struct emac_variant emac_variant_h3 = {
+ .default_syscon_value = 0x58000,
+ .syscon_field = &sun8i_syscon_reg_field,
+ .soc_has_internal_phy = true,
+ .support_mii = true,
+ .support_rmii = true,
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
+};
+
+static const struct emac_variant emac_variant_v3s = {
+ .default_syscon_value = 0x38000,
+ .syscon_field = &sun8i_syscon_reg_field,
+ .soc_has_internal_phy = true,
+ .support_mii = true
+};
+
+static const struct emac_variant emac_variant_a83t = {
+ .default_syscon_value = 0,
+ .syscon_field = &sun8i_syscon_reg_field,
+ .soc_has_internal_phy = false,
+ .support_mii = true,
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
+};
+
+static const struct emac_variant emac_variant_r40 = {
+ .default_syscon_value = 0,
+ .syscon_field = &sun8i_ccu_reg_field,
+ .support_mii = true,
+ .support_rgmii = true,
+ .rx_delay_max = 7,
+};
+
+static const struct emac_variant emac_variant_a64 = {
+ .default_syscon_value = 0,
+ .syscon_field = &sun8i_syscon_reg_field,
+ .soc_has_internal_phy = false,
+ .support_mii = true,
+ .support_rmii = true,
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
+};
+
+static const struct emac_variant emac_variant_h6 = {
+ .default_syscon_value = 0x50000,
+ .syscon_field = &sun8i_syscon_reg_field,
+ /* The "Internal PHY" of H6 is not on the die. It's on the
+ * co-packaged AC200 chip instead.
+ */
+ .soc_has_internal_phy = false,
+ .support_mii = true,
+ .support_rmii = true,
+ .support_rgmii = true,
+ .rx_delay_max = 31,
+ .tx_delay_max = 7,
+};
+
+#define EMAC_BASIC_CTL0 0x00
+#define EMAC_BASIC_CTL1 0x04
+#define EMAC_INT_STA 0x08
+#define EMAC_INT_EN 0x0C
+#define EMAC_TX_CTL0 0x10
+#define EMAC_TX_CTL1 0x14
+#define EMAC_TX_FLOW_CTL 0x1C
+#define EMAC_TX_DESC_LIST 0x20
+#define EMAC_RX_CTL0 0x24
+#define EMAC_RX_CTL1 0x28
+#define EMAC_RX_DESC_LIST 0x34
+#define EMAC_RX_FRM_FLT 0x38
+#define EMAC_MDIO_CMD 0x48
+#define EMAC_MDIO_DATA 0x4C
+#define EMAC_MACADDR_HI(reg) (0x50 + (reg) * 8)
+#define EMAC_MACADDR_LO(reg) (0x54 + (reg) * 8)
+#define EMAC_TX_DMA_STA 0xB0
+#define EMAC_TX_CUR_DESC 0xB4
+#define EMAC_TX_CUR_BUF 0xB8
+#define EMAC_RX_DMA_STA 0xC0
+#define EMAC_RX_CUR_DESC 0xC4
+#define EMAC_RX_CUR_BUF 0xC8
+
+/* Use in EMAC_BASIC_CTL0 */
+#define EMAC_DUPLEX_FULL BIT(0)
+#define EMAC_LOOPBACK BIT(1)
+#define EMAC_SPEED_1000 0
+#define EMAC_SPEED_100 (0x03 << 2)
+#define EMAC_SPEED_10 (0x02 << 2)
+
+/* Use in EMAC_BASIC_CTL1 */
+#define EMAC_BURSTLEN_SHIFT 24
+
+/* Used in EMAC_RX_FRM_FLT */
+#define EMAC_FRM_FLT_RXALL BIT(0)
+#define EMAC_FRM_FLT_CTL BIT(13)
+#define EMAC_FRM_FLT_MULTICAST BIT(16)
+
+/* Used in RX_CTL1*/
+#define EMAC_RX_MD BIT(1)
+#define EMAC_RX_TH_MASK GENMASK(5, 4)
+#define EMAC_RX_TH_32 0
+#define EMAC_RX_TH_64 (0x1 << 4)
+#define EMAC_RX_TH_96 (0x2 << 4)
+#define EMAC_RX_TH_128 (0x3 << 4)
+#define EMAC_RX_DMA_EN BIT(30)
+#define EMAC_RX_DMA_START BIT(31)
+
+/* Used in TX_CTL1*/
+#define EMAC_TX_MD BIT(1)
+#define EMAC_TX_NEXT_FRM BIT(2)
+#define EMAC_TX_TH_MASK GENMASK(10, 8)
+#define EMAC_TX_TH_64 0
+#define EMAC_TX_TH_128 (0x1 << 8)
+#define EMAC_TX_TH_192 (0x2 << 8)
+#define EMAC_TX_TH_256 (0x3 << 8)
+#define EMAC_TX_DMA_EN BIT(30)
+#define EMAC_TX_DMA_START BIT(31)
+
+/* Used in RX_CTL0 */
+#define EMAC_RX_RECEIVER_EN BIT(31)
+#define EMAC_RX_DO_CRC BIT(27)
+#define EMAC_RX_FLOW_CTL_EN BIT(16)
+
+/* Used in TX_CTL0 */
+#define EMAC_TX_TRANSMITTER_EN BIT(31)
+
+/* Used in EMAC_TX_FLOW_CTL */
+#define EMAC_TX_FLOW_CTL_EN BIT(0)
+
+/* Used in EMAC_INT_STA */
+#define EMAC_TX_INT BIT(0)
+#define EMAC_TX_DMA_STOP_INT BIT(1)
+#define EMAC_TX_BUF_UA_INT BIT(2)
+#define EMAC_TX_TIMEOUT_INT BIT(3)
+#define EMAC_TX_UNDERFLOW_INT BIT(4)
+#define EMAC_TX_EARLY_INT BIT(5)
+#define EMAC_RX_INT BIT(8)
+#define EMAC_RX_BUF_UA_INT BIT(9)
+#define EMAC_RX_DMA_STOP_INT BIT(10)
+#define EMAC_RX_TIMEOUT_INT BIT(11)
+#define EMAC_RX_OVERFLOW_INT BIT(12)
+#define EMAC_RX_EARLY_INT BIT(13)
+#define EMAC_RGMII_STA_INT BIT(16)
+
+#define EMAC_INT_MSK_COMMON EMAC_RGMII_STA_INT
+#define EMAC_INT_MSK_TX (EMAC_TX_INT | \
+ EMAC_TX_DMA_STOP_INT | \
+ EMAC_TX_BUF_UA_INT | \
+ EMAC_TX_TIMEOUT_INT | \
+ EMAC_TX_UNDERFLOW_INT | \
+ EMAC_TX_EARLY_INT |\
+ EMAC_INT_MSK_COMMON)
+#define EMAC_INT_MSK_RX (EMAC_RX_INT | \
+ EMAC_RX_BUF_UA_INT | \
+ EMAC_RX_DMA_STOP_INT | \
+ EMAC_RX_TIMEOUT_INT | \
+ EMAC_RX_OVERFLOW_INT | \
+ EMAC_RX_EARLY_INT | \
+ EMAC_INT_MSK_COMMON)
+
+#define MAC_ADDR_TYPE_DST BIT(31)
+
+/* H3 specific bits for EPHY */
+#define H3_EPHY_ADDR_SHIFT 20
+#define H3_EPHY_CLK_SEL BIT(18) /* 1: 24MHz, 0: 25MHz */
+#define H3_EPHY_LED_POL BIT(17) /* 1: active low, 0: active high */
+#define H3_EPHY_SHUTDOWN BIT(16) /* 1: shutdown, 0: power up */
+#define H3_EPHY_SELECT BIT(15) /* 1: internal PHY, 0: external PHY */
+#define H3_EPHY_MUX_MASK (H3_EPHY_SHUTDOWN | H3_EPHY_SELECT)
+#define DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID 1
+#define DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID 2
+
+/* H3/A64 specific bits */
+#define SYSCON_RMII_EN BIT(13) /* 1: enable RMII (overrides EPIT) */
+
+/* Generic system control EMAC_CLK bits */
+#define SYSCON_ETXDC_SHIFT 10
+#define SYSCON_ERXDC_SHIFT 5
+/* EMAC PHY Interface Type */
+#define SYSCON_EPIT BIT(2) /* 1: RGMII, 0: MII */
+#define SYSCON_ETCS_MASK GENMASK(1, 0)
+#define SYSCON_ETCS_MII 0x0
+#define SYSCON_ETCS_EXT_GMII 0x1
+#define SYSCON_ETCS_INT_GMII 0x2
+
+/* sun8i_dwmac_dma_reset() - reset the EMAC
+ * Called from stmmac via stmmac_dma_ops->reset
+ */
+static int sun8i_dwmac_dma_reset(void __iomem *ioaddr)
+{
+ writel(0, ioaddr + EMAC_RX_CTL1);
+ writel(0, ioaddr + EMAC_TX_CTL1);
+ writel(0, ioaddr + EMAC_RX_FRM_FLT);
+ writel(0, ioaddr + EMAC_RX_DESC_LIST);
+ writel(0, ioaddr + EMAC_TX_DESC_LIST);
+ writel(0, ioaddr + EMAC_INT_EN);
+ writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
+ return 0;
+}
+
+/* sun8i_dwmac_dma_init() - initialize the EMAC
+ * Called from stmmac via stmmac_dma_ops->init
+ */
+static void sun8i_dwmac_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, int atds)
+{
+ writel(EMAC_RX_INT | EMAC_TX_INT, ioaddr + EMAC_INT_EN);
+ writel(0x1FFFFFF, ioaddr + EMAC_INT_STA);
+}
+
+static void sun8i_dwmac_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t dma_rx_phy, u32 chan)
+{
+ /* Write RX descriptors address */
+ writel(lower_32_bits(dma_rx_phy), ioaddr + EMAC_RX_DESC_LIST);
+}
+
+static void sun8i_dwmac_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t dma_tx_phy, u32 chan)
+{
+ /* Write TX descriptors address */
+ writel(lower_32_bits(dma_tx_phy), ioaddr + EMAC_TX_DESC_LIST);
+}
+
+/* sun8i_dwmac_dump_regs() - Dump EMAC address space
+ * Called from stmmac_dma_ops->dump_regs
+ * Used for ethtool
+ */
+static void sun8i_dwmac_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = 0; i < 0xC8; i += 4) {
+ if (i == 0x32 || i == 0x3C)
+ continue;
+ reg_space[i / 4] = readl(ioaddr + i);
+ }
+}
+
+/* sun8i_dwmac_dump_mac_regs() - Dump EMAC address space
+ * Called from stmmac_ops->dump_regs
+ * Used for ethtool
+ */
+static void sun8i_dwmac_dump_mac_regs(struct mac_device_info *hw,
+ u32 *reg_space)
+{
+ int i;
+ void __iomem *ioaddr = hw->pcsr;
+
+ for (i = 0; i < 0xC8; i += 4) {
+ if (i == 0x32 || i == 0x3C)
+ continue;
+ reg_space[i / 4] = readl(ioaddr + i);
+ }
+}
+
+static void sun8i_dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + EMAC_INT_EN);
+
+ if (rx)
+ value |= EMAC_RX_INT;
+ if (tx)
+ value |= EMAC_TX_INT;
+
+ writel(value, ioaddr + EMAC_INT_EN);
+}
+
+static void sun8i_dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + EMAC_INT_EN);
+
+ if (rx)
+ value &= ~EMAC_RX_INT;
+ if (tx)
+ value &= ~EMAC_TX_INT;
+
+ writel(value, ioaddr + EMAC_INT_EN);
+}
+
+static void sun8i_dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ v |= EMAC_TX_DMA_START;
+ v |= EMAC_TX_DMA_EN;
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static void sun8i_dwmac_enable_dma_transmission(void __iomem *ioaddr)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ v |= EMAC_TX_DMA_START;
+ v |= EMAC_TX_DMA_EN;
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static void sun8i_dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ v &= ~EMAC_TX_DMA_EN;
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static void sun8i_dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ v |= EMAC_RX_DMA_START;
+ v |= EMAC_RX_DMA_EN;
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static void sun8i_dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ v &= ~EMAC_RX_DMA_EN;
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static int sun8i_dwmac_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan,
+ u32 dir)
+{
+ u32 v;
+ int ret = 0;
+
+ v = readl(ioaddr + EMAC_INT_STA);
+
+ if (dir == DMA_DIR_RX)
+ v &= EMAC_INT_MSK_RX;
+ else if (dir == DMA_DIR_TX)
+ v &= EMAC_INT_MSK_TX;
+
+ if (v & EMAC_TX_INT) {
+ ret |= handle_tx;
+ x->tx_normal_irq_n++;
+ }
+
+ if (v & EMAC_TX_DMA_STOP_INT)
+ x->tx_process_stopped_irq++;
+
+ if (v & EMAC_TX_BUF_UA_INT)
+ x->tx_process_stopped_irq++;
+
+ if (v & EMAC_TX_TIMEOUT_INT)
+ ret |= tx_hard_error;
+
+ if (v & EMAC_TX_UNDERFLOW_INT) {
+ ret |= tx_hard_error;
+ x->tx_undeflow_irq++;
+ }
+
+ if (v & EMAC_TX_EARLY_INT)
+ x->tx_early_irq++;
+
+ if (v & EMAC_RX_INT) {
+ ret |= handle_rx;
+ x->rx_normal_irq_n++;
+ }
+
+ if (v & EMAC_RX_BUF_UA_INT)
+ x->rx_buf_unav_irq++;
+
+ if (v & EMAC_RX_DMA_STOP_INT)
+ x->rx_process_stopped_irq++;
+
+ if (v & EMAC_RX_TIMEOUT_INT)
+ ret |= tx_hard_error;
+
+ if (v & EMAC_RX_OVERFLOW_INT) {
+ ret |= tx_hard_error;
+ x->rx_overflow_irq++;
+ }
+
+ if (v & EMAC_RX_EARLY_INT)
+ x->rx_early_irq++;
+
+ if (v & EMAC_RGMII_STA_INT)
+ x->irq_rgmii_n++;
+
+ writel(v, ioaddr + EMAC_INT_STA);
+
+ return ret;
+}
+
+static void sun8i_dwmac_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL1);
+ if (mode == SF_DMA_MODE) {
+ v |= EMAC_RX_MD;
+ } else {
+ v &= ~EMAC_RX_MD;
+ v &= ~EMAC_RX_TH_MASK;
+ if (mode < 32)
+ v |= EMAC_RX_TH_32;
+ else if (mode < 64)
+ v |= EMAC_RX_TH_64;
+ else if (mode < 96)
+ v |= EMAC_RX_TH_96;
+ else if (mode < 128)
+ v |= EMAC_RX_TH_128;
+ }
+ writel(v, ioaddr + EMAC_RX_CTL1);
+}
+
+static void sun8i_dwmac_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 v;
+
+ v = readl(ioaddr + EMAC_TX_CTL1);
+ if (mode == SF_DMA_MODE) {
+ v |= EMAC_TX_MD;
+ /* Undocumented bit (called TX_NEXT_FRM in BSP), the original
+ * comment is
+ * "Operating on second frame increase the performance
+ * especially when transmit store-and-forward is used."
+ */
+ v |= EMAC_TX_NEXT_FRM;
+ } else {
+ v &= ~EMAC_TX_MD;
+ v &= ~EMAC_TX_TH_MASK;
+ if (mode < 64)
+ v |= EMAC_TX_TH_64;
+ else if (mode < 128)
+ v |= EMAC_TX_TH_128;
+ else if (mode < 192)
+ v |= EMAC_TX_TH_192;
+ else if (mode < 256)
+ v |= EMAC_TX_TH_256;
+ }
+ writel(v, ioaddr + EMAC_TX_CTL1);
+}
+
+static const struct stmmac_dma_ops sun8i_dwmac_dma_ops = {
+ .reset = sun8i_dwmac_dma_reset,
+ .init = sun8i_dwmac_dma_init,
+ .init_rx_chan = sun8i_dwmac_dma_init_rx,
+ .init_tx_chan = sun8i_dwmac_dma_init_tx,
+ .dump_regs = sun8i_dwmac_dump_regs,
+ .dma_rx_mode = sun8i_dwmac_dma_operation_mode_rx,
+ .dma_tx_mode = sun8i_dwmac_dma_operation_mode_tx,
+ .enable_dma_transmission = sun8i_dwmac_enable_dma_transmission,
+ .enable_dma_irq = sun8i_dwmac_enable_dma_irq,
+ .disable_dma_irq = sun8i_dwmac_disable_dma_irq,
+ .start_tx = sun8i_dwmac_dma_start_tx,
+ .stop_tx = sun8i_dwmac_dma_stop_tx,
+ .start_rx = sun8i_dwmac_dma_start_rx,
+ .stop_rx = sun8i_dwmac_dma_stop_rx,
+ .dma_interrupt = sun8i_dwmac_dma_interrupt,
+};
+
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv);
+
+static int sun8i_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct sunxi_priv_data *gmac = priv;
+ int ret;
+
+ if (gmac->regulator) {
+ ret = regulator_enable(gmac->regulator);
+ if (ret) {
+ dev_err(&pdev->dev, "Fail to enable regulator\n");
+ return ret;
+ }
+ }
+
+ if (gmac->use_internal_phy) {
+ ret = sun8i_dwmac_power_internal_phy(netdev_priv(ndev));
+ if (ret)
+ goto err_disable_regulator;
+ }
+
+ return 0;
+
+err_disable_regulator:
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+
+ return ret;
+}
+
+static void sun8i_dwmac_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ v = (8 << EMAC_BURSTLEN_SHIFT); /* burst len */
+ writel(v, ioaddr + EMAC_BASIC_CTL1);
+}
+
+static void sun8i_dwmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 t, r;
+
+ t = readl(ioaddr + EMAC_TX_CTL0);
+ r = readl(ioaddr + EMAC_RX_CTL0);
+ if (enable) {
+ t |= EMAC_TX_TRANSMITTER_EN;
+ r |= EMAC_RX_RECEIVER_EN;
+ } else {
+ t &= ~EMAC_TX_TRANSMITTER_EN;
+ r &= ~EMAC_RX_RECEIVER_EN;
+ }
+ writel(t, ioaddr + EMAC_TX_CTL0);
+ writel(r, ioaddr + EMAC_RX_CTL0);
+}
+
+/* Set MAC address at slot reg_n
+ * All slot > 0 need to be enabled with MAC_ADDR_TYPE_DST
+ * If addr is NULL, clear the slot
+ */
+static void sun8i_dwmac_set_umac_addr(struct mac_device_info *hw,
+ const unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ if (!addr) {
+ writel(0, ioaddr + EMAC_MACADDR_HI(reg_n));
+ return;
+ }
+
+ stmmac_set_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n),
+ EMAC_MACADDR_LO(reg_n));
+ if (reg_n > 0) {
+ v = readl(ioaddr + EMAC_MACADDR_HI(reg_n));
+ v |= MAC_ADDR_TYPE_DST;
+ writel(v, ioaddr + EMAC_MACADDR_HI(reg_n));
+ }
+}
+
+static void sun8i_dwmac_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_get_mac_addr(ioaddr, addr, EMAC_MACADDR_HI(reg_n),
+ EMAC_MACADDR_LO(reg_n));
+}
+
+/* caution this function must return non 0 to work */
+static int sun8i_dwmac_rx_ipc_enable(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL0);
+ v |= EMAC_RX_DO_CRC;
+ writel(v, ioaddr + EMAC_RX_CTL0);
+
+ return 1;
+}
+
+static void sun8i_dwmac_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+ int i = 1;
+ struct netdev_hw_addr *ha;
+ int macaddrs = netdev_uc_count(dev) + netdev_mc_count(dev) + 1;
+
+ v = EMAC_FRM_FLT_CTL;
+
+ if (dev->flags & IFF_PROMISC) {
+ v = EMAC_FRM_FLT_RXALL;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ v |= EMAC_FRM_FLT_MULTICAST;
+ } else if (macaddrs <= hw->unicast_filter_entries) {
+ if (!netdev_mc_empty(dev)) {
+ netdev_for_each_mc_addr(ha, dev) {
+ sun8i_dwmac_set_umac_addr(hw, ha->addr, i);
+ i++;
+ }
+ }
+ if (!netdev_uc_empty(dev)) {
+ netdev_for_each_uc_addr(ha, dev) {
+ sun8i_dwmac_set_umac_addr(hw, ha->addr, i);
+ i++;
+ }
+ }
+ } else {
+ if (!(readl(ioaddr + EMAC_RX_FRM_FLT) & EMAC_FRM_FLT_RXALL))
+ netdev_info(dev, "Too many address, switching to promiscuous\n");
+ v = EMAC_FRM_FLT_RXALL;
+ }
+
+ /* Disable unused address filter slots */
+ while (i < hw->unicast_filter_entries)
+ sun8i_dwmac_set_umac_addr(hw, NULL, i++);
+
+ writel(v, ioaddr + EMAC_RX_FRM_FLT);
+}
+
+static void sun8i_dwmac_flow_ctrl(struct mac_device_info *hw,
+ unsigned int duplex, unsigned int fc,
+ unsigned int pause_time, u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 v;
+
+ v = readl(ioaddr + EMAC_RX_CTL0);
+ if (fc == FLOW_AUTO)
+ v |= EMAC_RX_FLOW_CTL_EN;
+ else
+ v &= ~EMAC_RX_FLOW_CTL_EN;
+ writel(v, ioaddr + EMAC_RX_CTL0);
+
+ v = readl(ioaddr + EMAC_TX_FLOW_CTL);
+ if (fc == FLOW_AUTO)
+ v |= EMAC_TX_FLOW_CTL_EN;
+ else
+ v &= ~EMAC_TX_FLOW_CTL_EN;
+ writel(v, ioaddr + EMAC_TX_FLOW_CTL);
+}
+
+static int sun8i_dwmac_reset(struct stmmac_priv *priv)
+{
+ u32 v;
+ int err;
+
+ v = readl(priv->ioaddr + EMAC_BASIC_CTL1);
+ writel(v | 0x01, priv->ioaddr + EMAC_BASIC_CTL1);
+
+ /* The timeout was previoulsy set to 10ms, but some board (OrangePI0)
+ * need more if no cable plugged. 100ms seems OK
+ */
+ err = readl_poll_timeout(priv->ioaddr + EMAC_BASIC_CTL1, v,
+ !(v & 0x01), 100, 100000);
+
+ if (err) {
+ dev_err(priv->device, "EMAC reset timeout\n");
+ return err;
+ }
+ return 0;
+}
+
+/* Search in mdio-mux node for internal PHY node and get its clk/reset */
+static int get_ephy_nodes(struct stmmac_priv *priv)
+{
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ struct device_node *mdio_mux, *iphynode;
+ struct device_node *mdio_internal;
+ int ret;
+
+ mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux");
+ if (!mdio_mux) {
+ dev_err(priv->device, "Cannot get mdio-mux node\n");
+ return -ENODEV;
+ }
+
+ mdio_internal = of_get_compatible_child(mdio_mux,
+ "allwinner,sun8i-h3-mdio-internal");
+ of_node_put(mdio_mux);
+ if (!mdio_internal) {
+ dev_err(priv->device, "Cannot get internal_mdio node\n");
+ return -ENODEV;
+ }
+
+ /* Seek for internal PHY */
+ for_each_child_of_node(mdio_internal, iphynode) {
+ gmac->ephy_clk = of_clk_get(iphynode, 0);
+ if (IS_ERR(gmac->ephy_clk))
+ continue;
+ gmac->rst_ephy = of_reset_control_get_exclusive(iphynode, NULL);
+ if (IS_ERR(gmac->rst_ephy)) {
+ ret = PTR_ERR(gmac->rst_ephy);
+ if (ret == -EPROBE_DEFER) {
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
+ return ret;
+ }
+ continue;
+ }
+ dev_info(priv->device, "Found internal PHY node\n");
+ of_node_put(iphynode);
+ of_node_put(mdio_internal);
+ return 0;
+ }
+
+ of_node_put(mdio_internal);
+ return -ENODEV;
+}
+
+static int sun8i_dwmac_power_internal_phy(struct stmmac_priv *priv)
+{
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ int ret;
+
+ if (gmac->internal_phy_powered) {
+ dev_warn(priv->device, "Internal PHY already powered\n");
+ return 0;
+ }
+
+ dev_info(priv->device, "Powering internal PHY\n");
+ ret = clk_prepare_enable(gmac->ephy_clk);
+ if (ret) {
+ dev_err(priv->device, "Cannot enable internal PHY\n");
+ return ret;
+ }
+
+ /* Make sure the EPHY is properly reseted, as U-Boot may leave
+ * it at deasserted state, and thus it may fail to reset EMAC.
+ *
+ * This assumes the driver has exclusive access to the EPHY reset.
+ */
+ ret = reset_control_reset(gmac->rst_ephy);
+ if (ret) {
+ dev_err(priv->device, "Cannot reset internal PHY\n");
+ clk_disable_unprepare(gmac->ephy_clk);
+ return ret;
+ }
+
+ gmac->internal_phy_powered = true;
+
+ return 0;
+}
+
+static void sun8i_dwmac_unpower_internal_phy(struct sunxi_priv_data *gmac)
+{
+ if (!gmac->internal_phy_powered)
+ return;
+
+ clk_disable_unprepare(gmac->ephy_clk);
+ reset_control_assert(gmac->rst_ephy);
+ gmac->internal_phy_powered = false;
+}
+
+/* MDIO multiplexing switch function
+ * This function is called by the mdio-mux layer when it thinks the mdio bus
+ * multiplexer needs to switch.
+ * 'current_child' is the current value of the mux register
+ * 'desired_child' is the value of the 'reg' property of the target child MDIO
+ * node.
+ * The first time this function is called, current_child == -1.
+ * If current_child == desired_child, then the mux is already set to the
+ * correct bus.
+ */
+static int mdio_mux_syscon_switch_fn(int current_child, int desired_child,
+ void *data)
+{
+ struct stmmac_priv *priv = data;
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+ u32 reg, val;
+ int ret = 0;
+
+ if (current_child ^ desired_child) {
+ regmap_field_read(gmac->regmap_field, &reg);
+ switch (desired_child) {
+ case DWMAC_SUN8I_MDIO_MUX_INTERNAL_ID:
+ dev_info(priv->device, "Switch mux to internal PHY");
+ val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SELECT;
+ gmac->use_internal_phy = true;
+ break;
+ case DWMAC_SUN8I_MDIO_MUX_EXTERNAL_ID:
+ dev_info(priv->device, "Switch mux to external PHY");
+ val = (reg & ~H3_EPHY_MUX_MASK) | H3_EPHY_SHUTDOWN;
+ gmac->use_internal_phy = false;
+ break;
+ default:
+ dev_err(priv->device, "Invalid child ID %x\n",
+ desired_child);
+ return -EINVAL;
+ }
+ regmap_field_write(gmac->regmap_field, val);
+ if (gmac->use_internal_phy) {
+ ret = sun8i_dwmac_power_internal_phy(priv);
+ if (ret)
+ return ret;
+ } else {
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ }
+ /* After changing syscon value, the MAC need reset or it will
+ * use the last value (and so the last PHY set).
+ */
+ ret = sun8i_dwmac_reset(priv);
+ }
+ return ret;
+}
+
+static int sun8i_dwmac_register_mdio_mux(struct stmmac_priv *priv)
+{
+ int ret;
+ struct device_node *mdio_mux;
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ mdio_mux = of_get_child_by_name(priv->device->of_node, "mdio-mux");
+ if (!mdio_mux)
+ return -ENODEV;
+
+ ret = mdio_mux_init(priv->device, mdio_mux, mdio_mux_syscon_switch_fn,
+ &gmac->mux_handle, priv, priv->mii);
+ of_node_put(mdio_mux);
+ return ret;
+}
+
+static int sun8i_dwmac_set_syscon(struct device *dev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct sunxi_priv_data *gmac = plat->bsp_priv;
+ struct device_node *node = dev->of_node;
+ int ret;
+ u32 reg, val;
+
+ ret = regmap_field_read(gmac->regmap_field, &val);
+ if (ret) {
+ dev_err(dev, "Fail to read from regmap field.\n");
+ return ret;
+ }
+
+ reg = gmac->variant->default_syscon_value;
+ if (reg != val)
+ dev_warn(dev,
+ "Current syscon value is not the default %x (expect %x)\n",
+ val, reg);
+
+ if (gmac->variant->soc_has_internal_phy) {
+ if (of_property_read_bool(node, "allwinner,leds-active-low"))
+ reg |= H3_EPHY_LED_POL;
+ else
+ reg &= ~H3_EPHY_LED_POL;
+
+ /* Force EPHY xtal frequency to 24MHz. */
+ reg |= H3_EPHY_CLK_SEL;
+
+ ret = of_mdio_parse_addr(dev, plat->phy_node);
+ if (ret < 0) {
+ dev_err(dev, "Could not parse MDIO addr\n");
+ return ret;
+ }
+ /* of_mdio_parse_addr returns a valid (0 ~ 31) PHY
+ * address. No need to mask it again.
+ */
+ reg |= 1 << H3_EPHY_ADDR_SHIFT;
+ } else {
+ /* For SoCs without internal PHY the PHY selection bit should be
+ * set to 0 (external PHY).
+ */
+ reg &= ~H3_EPHY_SELECT;
+ }
+
+ if (!of_property_read_u32(node, "allwinner,tx-delay-ps", &val)) {
+ if (val % 100) {
+ dev_err(dev, "tx-delay must be a multiple of 100\n");
+ return -EINVAL;
+ }
+ val /= 100;
+ dev_dbg(dev, "set tx-delay to %x\n", val);
+ if (val <= gmac->variant->tx_delay_max) {
+ reg &= ~(gmac->variant->tx_delay_max <<
+ SYSCON_ETXDC_SHIFT);
+ reg |= (val << SYSCON_ETXDC_SHIFT);
+ } else {
+ dev_err(dev, "Invalid TX clock delay: %d\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ if (!of_property_read_u32(node, "allwinner,rx-delay-ps", &val)) {
+ if (val % 100) {
+ dev_err(dev, "rx-delay must be a multiple of 100\n");
+ return -EINVAL;
+ }
+ val /= 100;
+ dev_dbg(dev, "set rx-delay to %x\n", val);
+ if (val <= gmac->variant->rx_delay_max) {
+ reg &= ~(gmac->variant->rx_delay_max <<
+ SYSCON_ERXDC_SHIFT);
+ reg |= (val << SYSCON_ERXDC_SHIFT);
+ } else {
+ dev_err(dev, "Invalid RX clock delay: %d\n",
+ val);
+ return -EINVAL;
+ }
+ }
+
+ /* Clear interface mode bits */
+ reg &= ~(SYSCON_ETCS_MASK | SYSCON_EPIT);
+ if (gmac->variant->support_rmii)
+ reg &= ~SYSCON_RMII_EN;
+
+ switch (plat->interface) {
+ case PHY_INTERFACE_MODE_MII:
+ /* default */
+ break;
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ reg |= SYSCON_EPIT | SYSCON_ETCS_INT_GMII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ reg |= SYSCON_RMII_EN | SYSCON_ETCS_EXT_GMII;
+ break;
+ default:
+ dev_err(dev, "Unsupported interface mode: %s",
+ phy_modes(plat->interface));
+ return -EINVAL;
+ }
+
+ regmap_field_write(gmac->regmap_field, reg);
+
+ return 0;
+}
+
+static void sun8i_dwmac_unset_syscon(struct sunxi_priv_data *gmac)
+{
+ u32 reg = gmac->variant->default_syscon_value;
+
+ regmap_field_write(gmac->regmap_field, reg);
+}
+
+static void sun8i_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ if (gmac->variant->soc_has_internal_phy)
+ sun8i_dwmac_unpower_internal_phy(gmac);
+
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+}
+
+static void sun8i_dwmac_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + EMAC_BASIC_CTL0);
+
+ if (enable)
+ value |= EMAC_LOOPBACK;
+ else
+ value &= ~EMAC_LOOPBACK;
+
+ writel(value, ioaddr + EMAC_BASIC_CTL0);
+}
+
+static const struct stmmac_ops sun8i_dwmac_ops = {
+ .core_init = sun8i_dwmac_core_init,
+ .set_mac = sun8i_dwmac_set_mac,
+ .dump_regs = sun8i_dwmac_dump_mac_regs,
+ .rx_ipc = sun8i_dwmac_rx_ipc_enable,
+ .set_filter = sun8i_dwmac_set_filter,
+ .flow_ctrl = sun8i_dwmac_flow_ctrl,
+ .set_umac_addr = sun8i_dwmac_set_umac_addr,
+ .get_umac_addr = sun8i_dwmac_get_umac_addr,
+ .set_mac_loopback = sun8i_dwmac_set_mac_loopback,
+};
+
+static struct mac_device_info *sun8i_dwmac_setup(void *ppriv)
+{
+ struct mac_device_info *mac;
+ struct stmmac_priv *priv = ppriv;
+
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ if (!mac)
+ return NULL;
+
+ mac->pcsr = priv->ioaddr;
+ mac->mac = &sun8i_dwmac_ops;
+ mac->dma = &sun8i_dwmac_dma_ops;
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+
+ /* The loopback bit seems to be re-set when link change
+ * Simply mask it each time
+ * Speed 10/100/1000 are set in BIT(2)/BIT(3)
+ */
+ mac->link.speed_mask = GENMASK(3, 2) | EMAC_LOOPBACK;
+ mac->link.speed10 = EMAC_SPEED_10;
+ mac->link.speed100 = EMAC_SPEED_100;
+ mac->link.speed1000 = EMAC_SPEED_1000;
+ mac->link.duplex = EMAC_DUPLEX_FULL;
+ mac->mii.addr = EMAC_MDIO_CMD;
+ mac->mii.data = EMAC_MDIO_DATA;
+ mac->mii.reg_shift = 4;
+ mac->mii.reg_mask = GENMASK(8, 4);
+ mac->mii.addr_shift = 12;
+ mac->mii.addr_mask = GENMASK(16, 12);
+ mac->mii.clk_csr_shift = 20;
+ mac->mii.clk_csr_mask = GENMASK(22, 20);
+ mac->unicast_filter_entries = 8;
+
+ /* Synopsys Id is not available */
+ priv->synopsys_id = 0;
+
+ return mac;
+}
+
+static struct regmap *sun8i_dwmac_get_syscon_from_dev(struct device_node *node)
+{
+ struct device_node *syscon_node;
+ struct platform_device *syscon_pdev;
+ struct regmap *regmap = NULL;
+
+ syscon_node = of_parse_phandle(node, "syscon", 0);
+ if (!syscon_node)
+ return ERR_PTR(-ENODEV);
+
+ syscon_pdev = of_find_device_by_node(syscon_node);
+ if (!syscon_pdev) {
+ /* platform device might not be probed yet */
+ regmap = ERR_PTR(-EPROBE_DEFER);
+ goto out_put_node;
+ }
+
+ /* If no regmap is found then the other device driver is at fault */
+ regmap = dev_get_regmap(&syscon_pdev->dev, NULL);
+ if (!regmap)
+ regmap = ERR_PTR(-EINVAL);
+
+ platform_device_put(syscon_pdev);
+out_put_node:
+ of_node_put(syscon_node);
+ return regmap;
+}
+
+static int sun8i_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+ phy_interface_t interface;
+ int ret;
+ struct stmmac_priv *priv;
+ struct net_device *ndev;
+ struct regmap *regmap;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return -ENOMEM;
+
+ gmac->variant = of_device_get_match_data(&pdev->dev);
+ if (!gmac->variant) {
+ dev_err(&pdev->dev, "Missing dwmac-sun8i variant\n");
+ return -EINVAL;
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(dev, "No regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ /* The "GMAC clock control" register might be located in the
+ * CCU address range (on the R40), or the system control address
+ * range (on most other sun8i and later SoCs).
+ *
+ * The former controls most if not all clocks in the SoC. The
+ * latter has an SoC identification register, and on some SoCs,
+ * controls to map device specific SRAM to either the intended
+ * peripheral, or the CPU address space.
+ *
+ * In either case, there should be a coordinated and restricted
+ * method of accessing the register needed here. This is done by
+ * having the device export a custom regmap, instead of a generic
+ * syscon, which grants all access to all registers.
+ *
+ * To support old device trees, we fall back to using the syscon
+ * interface if possible.
+ */
+ regmap = sun8i_dwmac_get_syscon_from_dev(pdev->dev.of_node);
+ if (IS_ERR(regmap))
+ regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
+ "syscon");
+ if (IS_ERR(regmap)) {
+ ret = PTR_ERR(regmap);
+ dev_err(&pdev->dev, "Unable to map syscon: %d\n", ret);
+ return ret;
+ }
+
+ gmac->regmap_field = devm_regmap_field_alloc(dev, regmap,
+ *gmac->variant->syscon_field);
+ if (IS_ERR(gmac->regmap_field)) {
+ ret = PTR_ERR(gmac->regmap_field);
+ dev_err(dev, "Unable to map syscon register: %d\n", ret);
+ return ret;
+ }
+
+ ret = of_get_phy_mode(dev->of_node, &interface);
+ if (ret)
+ return -EINVAL;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ /* platform data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers.
+ */
+ plat_dat->interface = interface;
+ plat_dat->rx_coe = STMMAC_RX_COE_TYPE2;
+ plat_dat->tx_coe = 1;
+ plat_dat->has_sun8i = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->init = sun8i_dwmac_init;
+ plat_dat->exit = sun8i_dwmac_exit;
+ plat_dat->setup = sun8i_dwmac_setup;
+ plat_dat->tx_fifo_size = 4096;
+ plat_dat->rx_fifo_size = 16384;
+
+ ret = sun8i_dwmac_set_syscon(&pdev->dev, plat_dat);
+ if (ret)
+ goto dwmac_deconfig;
+
+ ret = sun8i_dwmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ goto dwmac_syscon;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto dwmac_exit;
+
+ ndev = dev_get_drvdata(&pdev->dev);
+ priv = netdev_priv(ndev);
+
+ /* the MAC is runtime suspended after stmmac_dvr_probe(), so we
+ * need to ensure the MAC resume back before other operations such
+ * as reset.
+ */
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* The mux must be registered after parent MDIO
+ * so after stmmac_dvr_probe()
+ */
+ if (gmac->variant->soc_has_internal_phy) {
+ ret = get_ephy_nodes(priv);
+ if (ret)
+ goto dwmac_remove;
+ ret = sun8i_dwmac_register_mdio_mux(priv);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register mux\n");
+ goto dwmac_mux;
+ }
+ } else {
+ ret = sun8i_dwmac_reset(priv);
+ if (ret)
+ goto dwmac_remove;
+ }
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+dwmac_mux:
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+dwmac_remove:
+ pm_runtime_put_noidle(&pdev->dev);
+ stmmac_dvr_remove(&pdev->dev);
+dwmac_exit:
+ sun8i_dwmac_exit(pdev, gmac);
+dwmac_syscon:
+ sun8i_dwmac_unset_syscon(gmac);
+dwmac_deconfig:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int sun8i_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ if (gmac->variant->soc_has_internal_phy) {
+ mdio_mux_uninit(gmac->mux_handle);
+ sun8i_dwmac_unpower_internal_phy(gmac);
+ reset_control_put(gmac->rst_ephy);
+ clk_put(gmac->ephy_clk);
+ }
+
+ stmmac_pltfr_remove(pdev);
+ sun8i_dwmac_unset_syscon(gmac);
+
+ return 0;
+}
+
+static void sun8i_dwmac_shutdown(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct sunxi_priv_data *gmac = priv->plat->bsp_priv;
+
+ sun8i_dwmac_exit(pdev, gmac);
+}
+
+static const struct of_device_id sun8i_dwmac_match[] = {
+ { .compatible = "allwinner,sun8i-h3-emac",
+ .data = &emac_variant_h3 },
+ { .compatible = "allwinner,sun8i-v3s-emac",
+ .data = &emac_variant_v3s },
+ { .compatible = "allwinner,sun8i-a83t-emac",
+ .data = &emac_variant_a83t },
+ { .compatible = "allwinner,sun8i-r40-gmac",
+ .data = &emac_variant_r40 },
+ { .compatible = "allwinner,sun50i-a64-emac",
+ .data = &emac_variant_a64 },
+ { .compatible = "allwinner,sun50i-h6-emac",
+ .data = &emac_variant_h6 },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun8i_dwmac_match);
+
+static struct platform_driver sun8i_dwmac_driver = {
+ .probe = sun8i_dwmac_probe,
+ .remove = sun8i_dwmac_remove,
+ .shutdown = sun8i_dwmac_shutdown,
+ .driver = {
+ .name = "dwmac-sun8i",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun8i_dwmac_match,
+ },
+};
+module_platform_driver(sun8i_dwmac_driver);
+
+MODULE_AUTHOR("Corentin Labbe <clabbe.montjoie@gmail.com>");
+MODULE_DESCRIPTION("Allwinner sun8i DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
new file mode 100644
index 000000000..fc3b0acc8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sunxi.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * dwmac-sunxi.c - Allwinner sunxi DWMAC specific glue layer
+ *
+ * Copyright (C) 2013 Chen-Yu Tsai
+ *
+ * Chen-Yu Tsai <wens@csie.org>
+ */
+
+#include <linux/stmmac.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/regulator/consumer.h>
+
+#include "stmmac_platform.h"
+
+struct sunxi_priv_data {
+ phy_interface_t interface;
+ int clk_enabled;
+ struct clk *tx_clk;
+ struct regulator *regulator;
+};
+
+#define SUN7I_GMAC_GMII_RGMII_RATE 125000000
+#define SUN7I_GMAC_MII_RATE 25000000
+
+static int sun7i_gmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+ int ret = 0;
+
+ if (gmac->regulator) {
+ ret = regulator_enable(gmac->regulator);
+ if (ret)
+ return ret;
+ }
+
+ /* Set GMAC interface port mode
+ *
+ * The GMAC TX clock lines are configured by setting the clock
+ * rate, which then uses the auto-reparenting feature of the
+ * clock driver, and enabling/disabling the clock.
+ */
+ if (phy_interface_mode_is_rgmii(gmac->interface)) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ ret = clk_prepare(gmac->tx_clk);
+ if (ret && gmac->regulator)
+ regulator_disable(gmac->regulator);
+ }
+
+ return ret;
+}
+
+static void sun7i_gmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (gmac->regulator)
+ regulator_disable(gmac->regulator);
+}
+
+static void sun7i_fix_speed(void *priv, unsigned int speed)
+{
+ struct sunxi_priv_data *gmac = priv;
+
+ /* only GMII mode requires us to reconfigure the clock lines */
+ if (gmac->interface != PHY_INTERFACE_MODE_GMII)
+ return;
+
+ if (gmac->clk_enabled) {
+ clk_disable(gmac->tx_clk);
+ gmac->clk_enabled = 0;
+ }
+ clk_unprepare(gmac->tx_clk);
+
+ if (speed == 1000) {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_GMII_RGMII_RATE);
+ clk_prepare_enable(gmac->tx_clk);
+ gmac->clk_enabled = 1;
+ } else {
+ clk_set_rate(gmac->tx_clk, SUN7I_GMAC_MII_RATE);
+ clk_prepare(gmac->tx_clk);
+ }
+}
+
+static int sun7i_gmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct sunxi_priv_data *gmac;
+ struct device *dev = &pdev->dev;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac) {
+ ret = -ENOMEM;
+ goto err_remove_config_dt;
+ }
+
+ ret = of_get_phy_mode(dev->of_node, &gmac->interface);
+ if (ret && ret != -ENODEV) {
+ dev_err(dev, "Can't get phy-mode\n");
+ goto err_remove_config_dt;
+ }
+
+ gmac->tx_clk = devm_clk_get(dev, "allwinner_gmac_tx");
+ if (IS_ERR(gmac->tx_clk)) {
+ dev_err(dev, "could not get tx clock\n");
+ ret = PTR_ERR(gmac->tx_clk);
+ goto err_remove_config_dt;
+ }
+
+ /* Optional regulator for PHY */
+ gmac->regulator = devm_regulator_get_optional(dev, "phy");
+ if (IS_ERR(gmac->regulator)) {
+ if (PTR_ERR(gmac->regulator) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err_remove_config_dt;
+ }
+ dev_info(dev, "no regulator found\n");
+ gmac->regulator = NULL;
+ }
+
+ /* platform data specifying hardware features and callbacks.
+ * hardware features were copied from Allwinner drivers. */
+ plat_dat->tx_coe = 1;
+ plat_dat->has_gmac = true;
+ plat_dat->bsp_priv = gmac;
+ plat_dat->init = sun7i_gmac_init;
+ plat_dat->exit = sun7i_gmac_exit;
+ plat_dat->fix_mac_speed = sun7i_fix_speed;
+ plat_dat->tx_fifo_size = 4096;
+ plat_dat->rx_fifo_size = 16384;
+
+ ret = sun7i_gmac_init(pdev, plat_dat->bsp_priv);
+ if (ret)
+ goto err_remove_config_dt;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto err_gmac_exit;
+
+ return 0;
+
+err_gmac_exit:
+ sun7i_gmac_exit(pdev, plat_dat->bsp_priv);
+err_remove_config_dt:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static const struct of_device_id sun7i_dwmac_match[] = {
+ { .compatible = "allwinner,sun7i-a20-gmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sun7i_dwmac_match);
+
+static struct platform_driver sun7i_dwmac_driver = {
+ .probe = sun7i_gmac_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "sun7i-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = sun7i_dwmac_match,
+ },
+};
+module_platform_driver(sun7i_dwmac_driver);
+
+MODULE_AUTHOR("Chen-Yu Tsai <wens@csie.org>");
+MODULE_DESCRIPTION("Allwinner sunxi DWMAC specific glue layer");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
new file mode 100644
index 000000000..c3f10a92b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c
@@ -0,0 +1,300 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Toshiba Visconti Ethernet Support
+ *
+ * (C) Copyright 2020 TOSHIBA CORPORATION
+ * (C) Copyright 2020 Toshiba Electronic Devices & Storage Corporation
+ */
+
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+#include <linux/stmmac.h>
+
+#include "stmmac_platform.h"
+#include "dwmac4.h"
+
+#define REG_ETHER_CONTROL 0x52D4
+#define ETHER_ETH_CONTROL_RESET BIT(17)
+
+#define REG_ETHER_CLOCK_SEL 0x52D0
+#define ETHER_CLK_SEL_TX_CLK_EN BIT(0)
+#define ETHER_CLK_SEL_RX_CLK_EN BIT(1)
+#define ETHER_CLK_SEL_RMII_CLK_EN BIT(2)
+#define ETHER_CLK_SEL_RMII_CLK_RST BIT(3)
+#define ETHER_CLK_SEL_DIV_SEL_2 BIT(4)
+#define ETHER_CLK_SEL_DIV_SEL_20 0
+#define ETHER_CLK_SEL_FREQ_SEL_125M (BIT(9) | BIT(8))
+#define ETHER_CLK_SEL_FREQ_SEL_50M BIT(9)
+#define ETHER_CLK_SEL_FREQ_SEL_25M BIT(8)
+#define ETHER_CLK_SEL_FREQ_SEL_2P5M 0
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_IN 0
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC BIT(10)
+#define ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV BIT(11)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_IN 0
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC BIT(12)
+#define ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV BIT(13)
+#define ETHER_CLK_SEL_TX_CLK_O_TX_I 0
+#define ETHER_CLK_SEL_TX_CLK_O_RMII_I BIT(14)
+#define ETHER_CLK_SEL_TX_O_E_N_IN BIT(15)
+#define ETHER_CLK_SEL_RMII_CLK_SEL_IN 0
+#define ETHER_CLK_SEL_RMII_CLK_SEL_RX_C BIT(16)
+
+#define ETHER_CLK_SEL_RX_TX_CLK_EN (ETHER_CLK_SEL_RX_CLK_EN | ETHER_CLK_SEL_TX_CLK_EN)
+
+#define ETHER_CONFIG_INTF_MII 0
+#define ETHER_CONFIG_INTF_RGMII BIT(0)
+#define ETHER_CONFIG_INTF_RMII BIT(2)
+
+struct visconti_eth {
+ void __iomem *reg;
+ u32 phy_intf_sel;
+ struct clk *phy_ref_clk;
+ struct device *dev;
+ spinlock_t lock; /* lock to protect register update */
+};
+
+static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct visconti_eth *dwmac = priv;
+ struct net_device *netdev = dev_get_drvdata(dwmac->dev);
+ unsigned int val, clk_sel_val = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dwmac->lock, flags);
+
+ /* adjust link */
+ val = readl(dwmac->reg + MAC_CTRL_REG);
+ val &= ~(GMAC_CONFIG_PS | GMAC_CONFIG_FES);
+
+ switch (speed) {
+ case SPEED_1000:
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
+ break;
+ case SPEED_100:
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_25M;
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
+ clk_sel_val = ETHER_CLK_SEL_DIV_SEL_2;
+ val |= GMAC_CONFIG_PS | GMAC_CONFIG_FES;
+ break;
+ case SPEED_10:
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RGMII)
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_2P5M;
+ if (dwmac->phy_intf_sel == ETHER_CONFIG_INTF_RMII)
+ clk_sel_val = ETHER_CLK_SEL_DIV_SEL_20;
+ val |= GMAC_CONFIG_PS;
+ break;
+ default:
+ /* No bit control */
+ netdev_err(netdev, "Unsupported speed request (%d)", speed);
+ spin_unlock_irqrestore(&dwmac->lock, flags);
+ return;
+ }
+
+ writel(val, dwmac->reg + MAC_CTRL_REG);
+
+ /* Stop internal clock */
+ val = readl(dwmac->reg + REG_ETHER_CLOCK_SEL);
+ val &= ~(ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN);
+ val |= ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* Set Clock-Mux, Start clock, Set TX_O direction */
+ switch (dwmac->phy_intf_sel) {
+ case ETHER_CONFIG_INTF_RGMII:
+ val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val &= ~ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ break;
+ case ETHER_CONFIG_INTF_RMII:
+ val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_DIV |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_DIV | ETHER_CLK_SEL_TX_O_E_N_IN |
+ ETHER_CLK_SEL_RMII_CLK_SEL_RX_C;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_RST;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ break;
+ case ETHER_CONFIG_INTF_MII:
+ default:
+ val = clk_sel_val | ETHER_CLK_SEL_RX_CLK_EXT_SEL_RXC |
+ ETHER_CLK_SEL_TX_CLK_EXT_SEL_TXC | ETHER_CLK_SEL_TX_O_E_N_IN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ val |= ETHER_CLK_SEL_RX_TX_CLK_EN;
+ writel(val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+ break;
+ }
+
+ spin_unlock_irqrestore(&dwmac->lock, flags);
+}
+
+static int visconti_eth_init_hw(struct platform_device *pdev, struct plat_stmmacenet_data *plat_dat)
+{
+ struct visconti_eth *dwmac = plat_dat->bsp_priv;
+ unsigned int reg_val, clk_sel_val;
+
+ switch (plat_dat->phy_interface) {
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RGMII;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ dwmac->phy_intf_sel = ETHER_CONFIG_INTF_MII;
+ break;
+ case PHY_INTERFACE_MODE_RMII:
+ dwmac->phy_intf_sel = ETHER_CONFIG_INTF_RMII;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported phy-mode (%d)\n", plat_dat->phy_interface);
+ return -EOPNOTSUPP;
+ }
+
+ reg_val = dwmac->phy_intf_sel;
+ writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+
+ /* Enable TX/RX clock */
+ clk_sel_val = ETHER_CLK_SEL_FREQ_SEL_125M;
+ writel(clk_sel_val, dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ writel((clk_sel_val | ETHER_CLK_SEL_RMII_CLK_EN | ETHER_CLK_SEL_RX_TX_CLK_EN),
+ dwmac->reg + REG_ETHER_CLOCK_SEL);
+
+ /* release internal-reset */
+ reg_val |= ETHER_ETH_CONTROL_RESET;
+ writel(reg_val, dwmac->reg + REG_ETHER_CONTROL);
+
+ return 0;
+}
+
+static int visconti_eth_clock_probe(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat_dat)
+{
+ struct visconti_eth *dwmac = plat_dat->bsp_priv;
+ int err;
+
+ dwmac->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
+ if (IS_ERR(dwmac->phy_ref_clk))
+ return dev_err_probe(&pdev->dev, PTR_ERR(dwmac->phy_ref_clk),
+ "phy_ref_clk clock not found.\n");
+
+ err = clk_prepare_enable(dwmac->phy_ref_clk);
+ if (err < 0) {
+ dev_err(&pdev->dev, "failed to enable phy_ref clock: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static int visconti_eth_clock_remove(struct platform_device *pdev)
+{
+ struct visconti_eth *dwmac = get_stmmac_bsp_priv(&pdev->dev);
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ clk_disable_unprepare(dwmac->phy_ref_clk);
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+
+ return 0;
+}
+
+static int visconti_eth_dwmac_probe(struct platform_device *pdev)
+{
+ struct plat_stmmacenet_data *plat_dat;
+ struct stmmac_resources stmmac_res;
+ struct visconti_eth *dwmac;
+ int ret;
+
+ ret = stmmac_get_platform_resources(pdev, &stmmac_res);
+ if (ret)
+ return ret;
+
+ plat_dat = stmmac_probe_config_dt(pdev, stmmac_res.mac);
+ if (IS_ERR(plat_dat))
+ return PTR_ERR(plat_dat);
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac) {
+ ret = -ENOMEM;
+ goto remove_config;
+ }
+
+ spin_lock_init(&dwmac->lock);
+ dwmac->reg = stmmac_res.addr;
+ dwmac->dev = &pdev->dev;
+ plat_dat->bsp_priv = dwmac;
+ plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed;
+
+ ret = visconti_eth_clock_probe(pdev, plat_dat);
+ if (ret)
+ goto remove_config;
+
+ visconti_eth_init_hw(pdev, plat_dat);
+
+ plat_dat->dma_cfg->aal = 1;
+
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (ret)
+ goto remove;
+
+ return ret;
+
+remove:
+ visconti_eth_clock_remove(pdev);
+remove_config:
+ stmmac_remove_config_dt(pdev, plat_dat);
+
+ return ret;
+}
+
+static int visconti_eth_dwmac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = stmmac_pltfr_remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove platform: %d\n", err);
+
+ err = visconti_eth_clock_remove(pdev);
+ if (err < 0)
+ dev_err(&pdev->dev, "failed to remove clock: %d\n", err);
+
+ stmmac_remove_config_dt(pdev, priv->plat);
+
+ return err;
+}
+
+static const struct of_device_id visconti_eth_dwmac_match[] = {
+ { .compatible = "toshiba,visconti-dwmac" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, visconti_eth_dwmac_match);
+
+static struct platform_driver visconti_eth_dwmac_driver = {
+ .probe = visconti_eth_dwmac_probe,
+ .remove = visconti_eth_dwmac_remove,
+ .driver = {
+ .name = "visconti-eth-dwmac",
+ .of_match_table = visconti_eth_dwmac_match,
+ },
+};
+module_platform_driver(visconti_eth_dwmac_driver);
+
+MODULE_AUTHOR("Toshiba");
+MODULE_DESCRIPTION("Toshiba Visconti Ethernet DWMAC glue driver");
+MODULE_AUTHOR("Nobuhiro Iwamatsu <nobuhiro1.iwamatsu@toshiba.co.jp");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100.h b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
new file mode 100644
index 000000000..7ab791c8d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*******************************************************************************
+ MAC 10/100 Header File
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __DWMAC100_H__
+#define __DWMAC100_H__
+
+#include <linux/phy.h>
+#include "common.h"
+
+/*----------------------------------------------------------------------------
+ * MAC BLOCK defines
+ *---------------------------------------------------------------------------*/
+/* MAC CSR offset */
+#define MAC_CONTROL 0x00000000 /* MAC Control */
+#define MAC_ADDR_HIGH 0x00000004 /* MAC Address High */
+#define MAC_ADDR_LOW 0x00000008 /* MAC Address Low */
+#define MAC_HASH_HIGH 0x0000000c /* Multicast Hash Table High */
+#define MAC_HASH_LOW 0x00000010 /* Multicast Hash Table Low */
+#define MAC_MII_ADDR 0x00000014 /* MII Address */
+#define MAC_MII_DATA 0x00000018 /* MII Data */
+#define MAC_FLOW_CTRL 0x0000001c /* Flow Control */
+#define MAC_VLAN1 0x00000020 /* VLAN1 Tag */
+#define MAC_VLAN2 0x00000024 /* VLAN2 Tag */
+
+/* MAC CTRL defines */
+#define MAC_CONTROL_RA 0x80000000 /* Receive All Mode */
+#define MAC_CONTROL_BLE 0x40000000 /* Endian Mode */
+#define MAC_CONTROL_HBD 0x10000000 /* Heartbeat Disable */
+#define MAC_CONTROL_PS 0x08000000 /* Port Select */
+#define MAC_CONTROL_DRO 0x00800000 /* Disable Receive Own */
+#define MAC_CONTROL_EXT_LOOPBACK 0x00400000 /* Reserved (ext loopback?) */
+#define MAC_CONTROL_OM 0x00200000 /* Loopback Operating Mode */
+#define MAC_CONTROL_F 0x00100000 /* Full Duplex Mode */
+#define MAC_CONTROL_PM 0x00080000 /* Pass All Multicast */
+#define MAC_CONTROL_PR 0x00040000 /* Promiscuous Mode */
+#define MAC_CONTROL_IF 0x00020000 /* Inverse Filtering */
+#define MAC_CONTROL_PB 0x00010000 /* Pass Bad Frames */
+#define MAC_CONTROL_HO 0x00008000 /* Hash Only Filtering Mode */
+#define MAC_CONTROL_HP 0x00002000 /* Hash/Perfect Filtering Mode */
+#define MAC_CONTROL_LCC 0x00001000 /* Late Collision Control */
+#define MAC_CONTROL_DBF 0x00000800 /* Disable Broadcast Frames */
+#define MAC_CONTROL_DRTY 0x00000400 /* Disable Retry */
+#define MAC_CONTROL_ASTP 0x00000100 /* Automatic Pad Stripping */
+#define MAC_CONTROL_BOLMT_10 0x00000000 /* Back Off Limit 10 */
+#define MAC_CONTROL_BOLMT_8 0x00000040 /* Back Off Limit 8 */
+#define MAC_CONTROL_BOLMT_4 0x00000080 /* Back Off Limit 4 */
+#define MAC_CONTROL_BOLMT_1 0x000000c0 /* Back Off Limit 1 */
+#define MAC_CONTROL_DC 0x00000020 /* Deferral Check */
+#define MAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define MAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define MAC_CORE_INIT (MAC_CONTROL_HBD)
+
+/* MAC FLOW CTRL defines */
+#define MAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define MAC_FLOW_CTRL_PT_SHIFT 16
+#define MAC_FLOW_CTRL_PASS 0x00000004 /* Pass Control Frames */
+#define MAC_FLOW_CTRL_ENABLE 0x00000002 /* Flow Control Enable */
+#define MAC_FLOW_CTRL_PAUSE 0x00000001 /* Flow Control Busy ... */
+
+/* MII ADDR defines */
+#define MAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define MAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+
+/*----------------------------------------------------------------------------
+ * DMA BLOCK defines
+ *---------------------------------------------------------------------------*/
+
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_DBO 0x00100000 /* Descriptor Byte Ordering */
+#define DMA_BUS_MODE_BLE 0x00000080 /* Big Endian/Little Endian */
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+#define DMA_BUS_MODE_BAR_BUS 0x00000002 /* Bar-Bus Arbitration */
+#define DMA_BUS_MODE_DEFAULT 0x00000000
+
+/* DMA Control register defines */
+#define DMA_CONTROL_SF 0x00200000 /* Store And Forward */
+
+/* Transmit Threshold Control */
+enum ttc_control {
+ DMA_CONTROL_TTC_DEFAULT = 0x00000000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_64 = 0x00004000, /* Threshold is 64 DWORDS */
+ DMA_CONTROL_TTC_128 = 0x00008000, /* Threshold is 128 DWORDS */
+ DMA_CONTROL_TTC_256 = 0x0000c000, /* Threshold is 256 DWORDS */
+ DMA_CONTROL_TTC_18 = 0x00400000, /* Threshold is 18 DWORDS */
+ DMA_CONTROL_TTC_24 = 0x00404000, /* Threshold is 24 DWORDS */
+ DMA_CONTROL_TTC_32 = 0x00408000, /* Threshold is 32 DWORDS */
+ DMA_CONTROL_TTC_40 = 0x0040c000, /* Threshold is 40 DWORDS */
+ DMA_CONTROL_SE = 0x00000008, /* Stop On Empty */
+ DMA_CONTROL_OSF = 0x00000004, /* Operate On 2nd Frame */
+};
+
+/* STMAC110 DMA Missed Frame Counter register defines */
+#define DMA_MISSED_FRAME_OVE 0x10000000 /* FIFO Overflow Overflow */
+#define DMA_MISSED_FRAME_OVE_CNTR 0x0ffe0000 /* Overflow Frame Counter */
+#define DMA_MISSED_FRAME_OVE_M 0x00010000 /* Missed Frame Overflow */
+#define DMA_MISSED_FRAME_M_CNTR 0x0000ffff /* Missed Frame Couinter */
+
+extern const struct stmmac_dma_ops dwmac100_dma_ops;
+
+#endif /* __DWMAC100_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
new file mode 100644
index 000000000..4296ddda8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+#ifndef __DWMAC1000_H__
+#define __DWMAC1000_H__
+
+#include <linux/phy.h>
+#include "common.h"
+
+#define GMAC_CONTROL 0x00000000 /* Configuration */
+#define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */
+#define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */
+#define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */
+#define GMAC_MII_ADDR 0x00000010 /* MII Address */
+#define GMAC_MII_DATA 0x00000014 /* MII Data */
+#define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */
+#define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */
+#define GMAC_DEBUG 0x00000024 /* GMAC debug register */
+#define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */
+
+#define GMAC_INT_STATUS 0x00000038 /* interrupt status register */
+#define GMAC_INT_STATUS_PMT BIT(3)
+#define GMAC_INT_STATUS_MMCIS BIT(4)
+#define GMAC_INT_STATUS_MMCRIS BIT(5)
+#define GMAC_INT_STATUS_MMCTIS BIT(6)
+#define GMAC_INT_STATUS_MMCCSUM BIT(7)
+#define GMAC_INT_STATUS_TSTAMP BIT(9)
+#define GMAC_INT_STATUS_LPIIS BIT(10)
+
+/* interrupt mask register */
+#define GMAC_INT_MASK 0x0000003c
+#define GMAC_INT_DISABLE_RGMII BIT(0)
+#define GMAC_INT_DISABLE_PCSLINK BIT(1)
+#define GMAC_INT_DISABLE_PCSAN BIT(2)
+#define GMAC_INT_DISABLE_PMT BIT(3)
+#define GMAC_INT_DISABLE_TIMESTAMP BIT(9)
+#define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \
+ GMAC_INT_DISABLE_PCSLINK | \
+ GMAC_INT_DISABLE_PCSAN)
+#define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \
+ GMAC_INT_DISABLE_PCS)
+
+/* PMT Control and Status */
+#define GMAC_PMT 0x0000002c
+enum power_event {
+ pointer_reset = 0x80000000,
+ global_unicast = 0x00000200,
+ wake_up_rx_frame = 0x00000040,
+ magic_frame = 0x00000020,
+ wake_up_frame_en = 0x00000004,
+ magic_pkt_en = 0x00000002,
+ power_down = 0x00000001,
+};
+
+/* Energy Efficient Ethernet (EEE)
+ *
+ * LPI status, timer and control register offset
+ */
+#define LPI_CTRL_STATUS 0x0030
+#define LPI_TIMER_CTRL 0x0034
+
+/* LPI control and status defines */
+#define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */
+#define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */
+#define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */
+#define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */
+#define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */
+#define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */
+#define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */
+#define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */
+#define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */
+#define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */
+
+/* GMAC HW ADDR regs */
+#define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \
+ 0x00000040 + (reg * 8))
+#define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \
+ 0x00000044 + (reg * 8))
+#define GMAC_MAX_PERFECT_ADDRESSES 1
+
+#define GMAC_PCS_BASE 0x000000c0 /* PCS register base */
+#define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */
+
+/* SGMII/RGMII status register */
+#define GMAC_RGSMIIIS_LNKMODE BIT(0)
+#define GMAC_RGSMIIIS_SPEED GENMASK(2, 1)
+#define GMAC_RGSMIIIS_SPEED_SHIFT 1
+#define GMAC_RGSMIIIS_LNKSTS BIT(3)
+#define GMAC_RGSMIIIS_JABTO BIT(4)
+#define GMAC_RGSMIIIS_FALSECARDET BIT(5)
+#define GMAC_RGSMIIIS_SMIDRXS BIT(16)
+/* LNKMOD */
+#define GMAC_RGSMIIIS_LNKMOD_MASK 0x1
+/* LNKSPEED */
+#define GMAC_RGSMIIIS_SPEED_125 0x2
+#define GMAC_RGSMIIIS_SPEED_25 0x1
+#define GMAC_RGSMIIIS_SPEED_2_5 0x0
+
+/* GMAC Configuration defines */
+#define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */
+#define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */
+#define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */
+#define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */
+#define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */
+#define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */
+enum inter_frame_gap {
+ GMAC_CONTROL_IFG_88 = 0x00040000,
+ GMAC_CONTROL_IFG_80 = 0x00020000,
+ GMAC_CONTROL_IFG_40 = 0x000e0000,
+};
+#define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */
+#define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */
+#define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */
+#define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */
+#define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */
+#define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */
+#define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */
+#define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */
+#define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */
+#define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */
+#define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */
+#define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */
+#define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */
+
+#define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | \
+ GMAC_CONTROL_BE | GMAC_CONTROL_DCRS)
+
+/* GMAC Frame Filter defines */
+#define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */
+#define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */
+#define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */
+#define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */
+#define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */
+#define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */
+#define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */
+#define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */
+#define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */
+#define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */
+#define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */
+/* GMII ADDR defines */
+#define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */
+#define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */
+/* GMAC FLOW CTRL defines */
+#define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */
+#define GMAC_FLOW_CTRL_PT_SHIFT 16
+#define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */
+#define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */
+#define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */
+#define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
+
+/* DEBUG Register defines */
+/* MTL TxStatus FIFO */
+#define GMAC_DEBUG_TXSTSFSTS BIT(25) /* MTL TxStatus FIFO Full Status */
+#define GMAC_DEBUG_TXFSTS BIT(24) /* MTL Tx FIFO Not Empty Status */
+#define GMAC_DEBUG_TWCSTS BIT(22) /* MTL Tx FIFO Write Controller */
+/* MTL Tx FIFO Read Controller Status */
+#define GMAC_DEBUG_TRCSTS_MASK GENMASK(21, 20)
+#define GMAC_DEBUG_TRCSTS_SHIFT 20
+#define GMAC_DEBUG_TRCSTS_IDLE 0
+#define GMAC_DEBUG_TRCSTS_READ 1
+#define GMAC_DEBUG_TRCSTS_TXW 2
+#define GMAC_DEBUG_TRCSTS_WRITE 3
+#define GMAC_DEBUG_TXPAUSED BIT(19) /* MAC Transmitter in PAUSE */
+/* MAC Transmit Frame Controller Status */
+#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
+#define GMAC_DEBUG_TFCSTS_SHIFT 17
+#define GMAC_DEBUG_TFCSTS_IDLE 0
+#define GMAC_DEBUG_TFCSTS_WAIT 1
+#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
+#define GMAC_DEBUG_TFCSTS_XFER 3
+/* MAC GMII or MII Transmit Protocol Engine Status */
+#define GMAC_DEBUG_TPESTS BIT(16)
+#define GMAC_DEBUG_RXFSTS_MASK GENMASK(9, 8) /* MTL Rx FIFO Fill-level */
+#define GMAC_DEBUG_RXFSTS_SHIFT 8
+#define GMAC_DEBUG_RXFSTS_EMPTY 0
+#define GMAC_DEBUG_RXFSTS_BT 1
+#define GMAC_DEBUG_RXFSTS_AT 2
+#define GMAC_DEBUG_RXFSTS_FULL 3
+#define GMAC_DEBUG_RRCSTS_MASK GENMASK(6, 5) /* MTL Rx FIFO Read Controller */
+#define GMAC_DEBUG_RRCSTS_SHIFT 5
+#define GMAC_DEBUG_RRCSTS_IDLE 0
+#define GMAC_DEBUG_RRCSTS_RDATA 1
+#define GMAC_DEBUG_RRCSTS_RSTAT 2
+#define GMAC_DEBUG_RRCSTS_FLUSH 3
+#define GMAC_DEBUG_RWCSTS BIT(4) /* MTL Rx FIFO Write Controller Active */
+/* MAC Receive Frame Controller FIFO Status */
+#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
+#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
+/* MAC GMII or MII Receive Protocol Engine Status */
+#define GMAC_DEBUG_RPESTS BIT(0)
+
+/*--- DMA BLOCK defines ---*/
+/* DMA Bus Mode register defines */
+#define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */
+#define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */
+#define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */
+/* Programmable burst length (passed thorugh platform)*/
+#define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */
+#define DMA_BUS_MODE_PBL_SHIFT 8
+#define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */
+
+enum rx_tx_priority_ratio {
+ double_ratio = 0x00004000, /* 2:1 */
+ triple_ratio = 0x00008000, /* 3:1 */
+ quadruple_ratio = 0x0000c000, /* 4:1 */
+};
+
+#define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */
+#define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */
+#define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */
+#define DMA_BUS_MODE_RPBL_SHIFT 17
+#define DMA_BUS_MODE_USP 0x00800000
+#define DMA_BUS_MODE_MAXPBL 0x01000000
+#define DMA_BUS_MODE_AAL 0x02000000
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */
+#define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */
+/* DMA Bus Mode register defines */
+#define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */
+#define DMA_BUS_PR_RATIO_SHIFT 14
+#define DMA_BUS_FB 0x00010000 /* Fixed Burst */
+
+/* DMA operation mode defines (start/stop tx/rx are placed in common header)*/
+/* Disable Drop TCP/IP csum error */
+#define DMA_CONTROL_DT 0x04000000
+#define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */
+#define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */
+/* Threshold for Activating the FC */
+enum rfa {
+ act_full_minus_1 = 0x00800000,
+ act_full_minus_2 = 0x00800200,
+ act_full_minus_3 = 0x00800400,
+ act_full_minus_4 = 0x00800600,
+};
+/* Threshold for Deactivating the FC */
+enum rfd {
+ deac_full_minus_1 = 0x00400000,
+ deac_full_minus_2 = 0x00400800,
+ deac_full_minus_3 = 0x00401000,
+ deac_full_minus_4 = 0x00401800,
+};
+#define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */
+
+enum ttc_control {
+ DMA_CONTROL_TTC_64 = 0x00000000,
+ DMA_CONTROL_TTC_128 = 0x00004000,
+ DMA_CONTROL_TTC_192 = 0x00008000,
+ DMA_CONTROL_TTC_256 = 0x0000c000,
+ DMA_CONTROL_TTC_40 = 0x00010000,
+ DMA_CONTROL_TTC_32 = 0x00014000,
+ DMA_CONTROL_TTC_24 = 0x00018000,
+ DMA_CONTROL_TTC_16 = 0x0001c000,
+};
+#define DMA_CONTROL_TC_TX_MASK 0xfffe3fff
+
+#define DMA_CONTROL_EFC 0x00000100
+#define DMA_CONTROL_FEF 0x00000080
+#define DMA_CONTROL_FUF 0x00000040
+
+/* Receive flow control activation field
+ * RFA field in DMA control register, bits 23,10:9
+ */
+#define DMA_CONTROL_RFA_MASK 0x00800600
+
+/* Receive flow control deactivation field
+ * RFD field in DMA control register, bits 22,12:11
+ */
+#define DMA_CONTROL_RFD_MASK 0x00401800
+
+/* RFD and RFA fields are encoded as follows
+ *
+ * Bit Field
+ * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled)
+ * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled)
+ * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled)
+ * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled)
+ * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled)
+ * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled)
+ * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled)
+ * 1,11 - Reserved
+ *
+ * RFD should always be > RFA for a given FIFO size. RFD == RFA may work,
+ * but packet throughput performance may not be as expected.
+ *
+ * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame
+ * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause
+ * Description).
+ *
+ * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6),
+ * is set to 0. This allows pause frames with a quanta of 0 to be sent
+ * as an XOFF message to the link peer.
+ */
+
+#define RFA_FULL_MINUS_1K 0x00000000
+#define RFA_FULL_MINUS_2K 0x00000200
+#define RFA_FULL_MINUS_3K 0x00000400
+#define RFA_FULL_MINUS_4K 0x00000600
+#define RFA_FULL_MINUS_5K 0x00800000
+#define RFA_FULL_MINUS_6K 0x00800200
+#define RFA_FULL_MINUS_7K 0x00800400
+
+#define RFD_FULL_MINUS_1K 0x00000000
+#define RFD_FULL_MINUS_2K 0x00000800
+#define RFD_FULL_MINUS_3K 0x00001000
+#define RFD_FULL_MINUS_4K 0x00001800
+#define RFD_FULL_MINUS_5K 0x00400000
+#define RFD_FULL_MINUS_6K 0x00400800
+#define RFD_FULL_MINUS_7K 0x00401000
+
+enum rtc_control {
+ DMA_CONTROL_RTC_64 = 0x00000000,
+ DMA_CONTROL_RTC_32 = 0x00000008,
+ DMA_CONTROL_RTC_96 = 0x00000010,
+ DMA_CONTROL_RTC_128 = 0x00000018,
+};
+#define DMA_CONTROL_TC_RX_MASK 0xffffffe7
+
+#define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */
+
+/* MMC registers offset */
+#define GMAC_MMC_CTRL 0x100
+#define GMAC_MMC_RX_INTR 0x104
+#define GMAC_MMC_TX_INTR 0x108
+#define GMAC_MMC_RX_CSUM_OFFLOAD 0x208
+#define GMAC_EXTHASH_BASE 0x500
+
+extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+#endif /* __DWMAC1000_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
new file mode 100644
index 000000000..0e00dd83d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -0,0 +1,556 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/ethtool.h>
+#include <asm/io.h>
+#include "stmmac.h"
+#include "stmmac_pcs.h"
+#include "dwmac1000.h"
+
+static void dwmac1000_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+ int mtu = dev->mtu;
+
+ /* Configure GMAC core */
+ value |= GMAC_CORE_INIT;
+
+ if (mtu > 1500)
+ value |= GMAC_CONTROL_2K;
+ if (mtu > 2000)
+ value |= GMAC_CONTROL_JE;
+
+ if (hw->ps) {
+ value |= GMAC_CONTROL_TE;
+
+ value &= ~hw->link.speed_mask;
+ switch (hw->ps) {
+ case SPEED_1000:
+ value |= hw->link.speed1000;
+ break;
+ case SPEED_100:
+ value |= hw->link.speed100;
+ break;
+ case SPEED_10:
+ value |= hw->link.speed10;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ /* Mask GMAC interrupts */
+ value = GMAC_INT_DEFAULT_MASK;
+
+ if (hw->pcs)
+ value &= ~GMAC_INT_DISABLE_PCS;
+
+ writel(value, ioaddr + GMAC_INT_MASK);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Tag detection without filtering */
+ writel(0x0, ioaddr + GMAC_VLAN_TAG);
+#endif
+}
+
+static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ if (hw->rx_csum)
+ value |= GMAC_CONTROL_IPC;
+ else
+ value &= ~GMAC_CONTROL_IPC;
+
+ writel(value, ioaddr + GMAC_CONTROL);
+
+ value = readl(ioaddr + GMAC_CONTROL);
+
+ return !!(value & GMAC_CONTROL_IPC);
+}
+
+static void dwmac1000_dump_regs(struct mac_device_info *hw, u32 *reg_space)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ for (i = 0; i < 55; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
+static void dwmac1000_set_umac_addr(struct mac_device_info *hw,
+ const unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
+ int mcbitslog2)
+{
+ int numhashregs, regs;
+
+ switch (mcbitslog2) {
+ case 6:
+ writel(mcfilterbits[0], ioaddr + GMAC_HASH_LOW);
+ writel(mcfilterbits[1], ioaddr + GMAC_HASH_HIGH);
+ return;
+ case 7:
+ numhashregs = 4;
+ break;
+ case 8:
+ numhashregs = 8;
+ break;
+ default:
+ pr_debug("STMMAC: err in setting multicast filter\n");
+ return;
+ }
+ for (regs = 0; regs < numhashregs; regs++)
+ writel(mcfilterbits[regs],
+ ioaddr + GMAC_EXTHASH_BASE + regs * 4);
+}
+
+static void dwmac1000_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ unsigned int value = 0;
+ unsigned int perfect_addr_number = hw->unicast_filter_entries;
+ u32 mc_filter[8];
+ int mcbitslog2 = hw->mcast_bits_log2;
+
+ pr_debug("%s: # mcasts %d, # unicast %d\n", __func__,
+ netdev_mc_count(dev), netdev_uc_count(dev));
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+
+ if (dev->flags & IFF_PROMISC) {
+ value = GMAC_FRAME_FILTER_PR | GMAC_FRAME_FILTER_PCF;
+ } else if (dev->flags & IFF_ALLMULTI) {
+ value = GMAC_FRAME_FILTER_PM; /* pass all multi */
+ } else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
+ /* Fall back to all multicast if we've no filter */
+ value = GMAC_FRAME_FILTER_PM;
+ } else if (!netdev_mc_empty(dev)) {
+ struct netdev_hw_addr *ha;
+
+ /* Hash filter for multicast */
+ value = GMAC_FRAME_FILTER_HMC;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper n bits of the calculated CRC are used to
+ * index the contents of the hash table. The number of
+ * bits used depends on the hardware configuration
+ * selected at core configuration time.
+ */
+ int bit_nr = bitrev32(~crc32_le(~0, ha->addr,
+ ETH_ALEN)) >>
+ (32 - mcbitslog2);
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register.
+ */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ }
+
+ value |= GMAC_FRAME_FILTER_HPF;
+ dwmac1000_set_mchash(ioaddr, mc_filter, mcbitslog2);
+
+ /* Handle multiple unicast addresses (perfect filtering) */
+ if (netdev_uc_count(dev) > perfect_addr_number)
+ /* Switch to promiscuous mode if more than unicast
+ * addresses are requested than supported by hardware.
+ */
+ value |= GMAC_FRAME_FILTER_PR;
+ else {
+ int reg = 1;
+ struct netdev_hw_addr *ha;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ stmmac_set_mac_addr(ioaddr, ha->addr,
+ GMAC_ADDR_HIGH(reg),
+ GMAC_ADDR_LOW(reg));
+ reg++;
+ }
+
+ while (reg < perfect_addr_number) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
+ }
+
+#ifdef FRAME_FILTER_DEBUG
+ /* Enable Receive all mode (to debug filtering_fail errors) */
+ value |= GMAC_FRAME_FILTER_RA;
+#endif
+ writel(value, ioaddr + GMAC_FRAME_FILTER);
+}
+
+
+static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ /* Set flow such that DZPQ in Mac Register 6 is 0,
+ * and unicast pause detect is enabled.
+ */
+ unsigned int flow = GMAC_FLOW_CTRL_UP;
+
+ pr_debug("GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ pr_debug("\tReceive Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_RFE;
+ }
+ if (fc & FLOW_TX) {
+ pr_debug("\tTransmit Flow-Control ON\n");
+ flow |= GMAC_FLOW_CTRL_TFE;
+ }
+
+ if (duplex) {
+ pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
+ flow |= (pause_time << GMAC_FLOW_CTRL_PT_SHIFT);
+ }
+
+ writel(flow, ioaddr + GMAC_FLOW_CTRL);
+}
+
+static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ unsigned int pmt = 0;
+
+ if (mode & WAKE_MAGIC) {
+ pr_debug("GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ }
+ if (mode & WAKE_UCAST) {
+ pr_debug("GMAC: WOL on global unicast\n");
+ pmt |= power_down | global_unicast | wake_up_frame_en;
+ }
+
+ writel(pmt, ioaddr + GMAC_PMT);
+}
+
+/* RGMII or SMII interface */
+static void dwmac1000_rgsmii(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+ u32 status;
+
+ status = readl(ioaddr + GMAC_RGSMIIIS);
+ x->irq_rgmii_n++;
+
+ /* Check the link status */
+ if (status & GMAC_RGSMIIIS_LNKSTS) {
+ int speed_value;
+
+ x->pcs_link = 1;
+
+ speed_value = ((status & GMAC_RGSMIIIS_SPEED) >>
+ GMAC_RGSMIIIS_SPEED_SHIFT);
+ if (speed_value == GMAC_RGSMIIIS_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_RGSMIIIS_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
+
+ x->pcs_duplex = (status & GMAC_RGSMIIIS_LNKMOD_MASK);
+
+ pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_info("Link is Down\n");
+ }
+}
+
+static int dwmac1000_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ u32 intr_mask = readl(ioaddr + GMAC_INT_MASK);
+ int ret = 0;
+
+ /* Discard masked bits */
+ intr_status &= ~intr_mask;
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & GMAC_INT_STATUS_MMCTIS))
+ x->mmc_tx_irq_n++;
+ if (unlikely(intr_status & GMAC_INT_STATUS_MMCRIS))
+ x->mmc_rx_irq_n++;
+ if (unlikely(intr_status & GMAC_INT_STATUS_MMCCSUM))
+ x->mmc_rx_csum_offload_irq_n++;
+ if (unlikely(intr_status & GMAC_INT_DISABLE_PMT)) {
+ /* clear the PMT bits 5 and 6 by reading the PMT status reg */
+ readl(ioaddr + GMAC_PMT);
+ x->irq_receive_pmt_irq_n++;
+ }
+
+ /* MAC tx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & GMAC_INT_STATUS_LPIIS) {
+ /* Clean LPI interrupt by reading the Reg 12 */
+ ret = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (ret & LPI_CTRL_STATUS_TLPIEN)
+ x->irq_tx_path_in_lpi_mode_n++;
+ if (ret & LPI_CTRL_STATUS_TLPIEX)
+ x->irq_tx_path_exit_lpi_mode_n++;
+ if (ret & LPI_CTRL_STATUS_RLPIEN)
+ x->irq_rx_path_in_lpi_mode_n++;
+ if (ret & LPI_CTRL_STATUS_RLPIEX)
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+
+ dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+
+ if (intr_status & PCS_RGSMIIIS_IRQ)
+ dwmac1000_rgsmii(ioaddr, x);
+
+ return ret;
+}
+
+static void dwmac1000_set_eee_mode(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ /*TODO - en_tx_lpi_clockgating treatment */
+
+ /* Enable the link status receive on RGMII, SGMII ore SMII
+ * receive path and instruct the transmit to enter in LPI
+ * state.
+ */
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value |= LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA;
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_reset_eee_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+ value &= ~(LPI_CTRL_STATUS_LPIEN | LPI_CTRL_STATUS_LPITXA);
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + LPI_CTRL_STATUS);
+
+ if (link)
+ value |= LPI_CTRL_STATUS_PLS;
+ else
+ value &= ~LPI_CTRL_STATUS_PLS;
+
+ writel(value, ioaddr + LPI_CTRL_STATUS);
+}
+
+static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + LPI_TIMER_CTRL);
+}
+
+static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback)
+{
+ dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
+}
+
+static void dwmac1000_rane(void __iomem *ioaddr, bool restart)
+{
+ dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
+}
+
+static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+}
+
+static void dwmac1000_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues)
+{
+ u32 value = readl(ioaddr + GMAC_DEBUG);
+
+ if (value & GMAC_DEBUG_TXSTSFSTS)
+ x->mtl_tx_status_fifo_full++;
+ if (value & GMAC_DEBUG_TXFSTS)
+ x->mtl_tx_fifo_not_empty++;
+ if (value & GMAC_DEBUG_TWCSTS)
+ x->mmtl_fifo_ctrl++;
+ if (value & GMAC_DEBUG_TRCSTS_MASK) {
+ u32 trcsts = (value & GMAC_DEBUG_TRCSTS_MASK)
+ >> GMAC_DEBUG_TRCSTS_SHIFT;
+ if (trcsts == GMAC_DEBUG_TRCSTS_WRITE)
+ x->mtl_tx_fifo_read_ctrl_write++;
+ else if (trcsts == GMAC_DEBUG_TRCSTS_TXW)
+ x->mtl_tx_fifo_read_ctrl_wait++;
+ else if (trcsts == GMAC_DEBUG_TRCSTS_READ)
+ x->mtl_tx_fifo_read_ctrl_read++;
+ else
+ x->mtl_tx_fifo_read_ctrl_idle++;
+ }
+ if (value & GMAC_DEBUG_TXPAUSED)
+ x->mac_tx_in_pause++;
+ if (value & GMAC_DEBUG_TFCSTS_MASK) {
+ u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
+ >> GMAC_DEBUG_TFCSTS_SHIFT;
+
+ if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
+ x->mac_tx_frame_ctrl_xfer++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
+ x->mac_tx_frame_ctrl_pause++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
+ x->mac_tx_frame_ctrl_wait++;
+ else
+ x->mac_tx_frame_ctrl_idle++;
+ }
+ if (value & GMAC_DEBUG_TPESTS)
+ x->mac_gmii_tx_proto_engine++;
+ if (value & GMAC_DEBUG_RXFSTS_MASK) {
+ u32 rxfsts = (value & GMAC_DEBUG_RXFSTS_MASK)
+ >> GMAC_DEBUG_RRCSTS_SHIFT;
+
+ if (rxfsts == GMAC_DEBUG_RXFSTS_FULL)
+ x->mtl_rx_fifo_fill_level_full++;
+ else if (rxfsts == GMAC_DEBUG_RXFSTS_AT)
+ x->mtl_rx_fifo_fill_above_thresh++;
+ else if (rxfsts == GMAC_DEBUG_RXFSTS_BT)
+ x->mtl_rx_fifo_fill_below_thresh++;
+ else
+ x->mtl_rx_fifo_fill_level_empty++;
+ }
+ if (value & GMAC_DEBUG_RRCSTS_MASK) {
+ u32 rrcsts = (value & GMAC_DEBUG_RRCSTS_MASK) >>
+ GMAC_DEBUG_RRCSTS_SHIFT;
+
+ if (rrcsts == GMAC_DEBUG_RRCSTS_FLUSH)
+ x->mtl_rx_fifo_read_ctrl_flush++;
+ else if (rrcsts == GMAC_DEBUG_RRCSTS_RSTAT)
+ x->mtl_rx_fifo_read_ctrl_read_data++;
+ else if (rrcsts == GMAC_DEBUG_RRCSTS_RDATA)
+ x->mtl_rx_fifo_read_ctrl_status++;
+ else
+ x->mtl_rx_fifo_read_ctrl_idle++;
+ }
+ if (value & GMAC_DEBUG_RWCSTS)
+ x->mtl_rx_fifo_ctrl_active++;
+ if (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+ if (value & GMAC_DEBUG_RPESTS)
+ x->mac_gmii_rx_proto_engine++;
+}
+
+static void dwmac1000_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONTROL);
+
+ if (enable)
+ value |= GMAC_CONTROL_LM;
+ else
+ value &= ~GMAC_CONTROL_LM;
+
+ writel(value, ioaddr + GMAC_CONTROL);
+}
+
+const struct stmmac_ops dwmac1000_ops = {
+ .core_init = dwmac1000_core_init,
+ .set_mac = stmmac_set_mac,
+ .rx_ipc = dwmac1000_rx_ipc_enable,
+ .dump_regs = dwmac1000_dump_regs,
+ .host_irq_status = dwmac1000_irq_status,
+ .set_filter = dwmac1000_set_filter,
+ .flow_ctrl = dwmac1000_flow_ctrl,
+ .pmt = dwmac1000_pmt,
+ .set_umac_addr = dwmac1000_set_umac_addr,
+ .get_umac_addr = dwmac1000_get_umac_addr,
+ .set_eee_mode = dwmac1000_set_eee_mode,
+ .reset_eee_mode = dwmac1000_reset_eee_mode,
+ .set_eee_timer = dwmac1000_set_eee_timer,
+ .set_eee_pls = dwmac1000_set_eee_pls,
+ .debug = dwmac1000_debug,
+ .pcs_ctrl_ane = dwmac1000_ctrl_ane,
+ .pcs_rane = dwmac1000_rane,
+ .pcs_get_adv_lp = dwmac1000_get_adv_lp,
+ .set_mac_loopback = dwmac1000_set_mac_loopback,
+};
+
+int dwmac1000_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tDWMAC1000\n");
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->link.duplex = GMAC_CONTROL_DM;
+ mac->link.speed10 = GMAC_CONTROL_PS;
+ mac->link.speed100 = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = GMAC_CONTROL_PS | GMAC_CONTROL_FES;
+ mac->mii.addr = GMAC_MII_ADDR;
+ mac->mii.data = GMAC_MII_DATA;
+ mac->mii.addr_shift = 11;
+ mac->mii.addr_mask = 0x0000F800;
+ mac->mii.reg_shift = 6;
+ mac->mii.reg_mask = 0x000007C0;
+ mac->mii.clk_csr_shift = 2;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
new file mode 100644
index 000000000..f5581db0b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ DWC Ether MAC 10/100/1000 Universal version 3.41a has been used for
+ developing this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <asm/io.h>
+#include "dwmac1000.h"
+#include "dwmac_dma.h"
+
+static void dwmac1000_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+ u32 value = readl(ioaddr + DMA_AXI_BUS_MODE);
+ int i;
+
+ pr_info("dwmac1000: Master AXI performs %s burst length\n",
+ !(value & DMA_AXI_UNDEF) ? "fixed" : "any");
+
+ if (axi->axi_lpi_en)
+ value |= DMA_AXI_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= DMA_AXI_LPI_XIT_FRM;
+
+ value &= ~DMA_AXI_WR_OSR_LMT;
+ value |= (axi->axi_wr_osr_lmt & DMA_AXI_WR_OSR_LMT_MASK) <<
+ DMA_AXI_WR_OSR_LMT_SHIFT;
+
+ value &= ~DMA_AXI_RD_OSR_LMT;
+ value |= (axi->axi_rd_osr_lmt & DMA_AXI_RD_OSR_LMT_MASK) <<
+ DMA_AXI_RD_OSR_LMT_SHIFT;
+
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set).
+ */
+ for (i = 0; i < AXI_BLEN; i++) {
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= DMA_AXI_BLEN256;
+ break;
+ case 128:
+ value |= DMA_AXI_BLEN128;
+ break;
+ case 64:
+ value |= DMA_AXI_BLEN64;
+ break;
+ case 32:
+ value |= DMA_AXI_BLEN32;
+ break;
+ case 16:
+ value |= DMA_AXI_BLEN16;
+ break;
+ case 8:
+ value |= DMA_AXI_BLEN8;
+ break;
+ case 4:
+ value |= DMA_AXI_BLEN4;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + DMA_AXI_BUS_MODE);
+}
+
+static void dwmac1000_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, int atds)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+ int txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ int rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+
+ /*
+ * Set the DMA PBL (Programmable Burst Length) mode.
+ *
+ * Note: before stmmac core 3.50 this mode bit was 4xPBL, and
+ * post 3.5 mode bit acts as 8*PBL.
+ */
+ if (dma_cfg->pblx8)
+ value |= DMA_BUS_MODE_MAXPBL;
+ value |= DMA_BUS_MODE_USP;
+ value &= ~(DMA_BUS_MODE_PBL_MASK | DMA_BUS_MODE_RPBL_MASK);
+ value |= (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+ value |= (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+
+ /* Set the Fixed burst mode */
+ if (dma_cfg->fixed_burst)
+ value |= DMA_BUS_MODE_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (dma_cfg->mixed_burst)
+ value |= DMA_BUS_MODE_MB;
+
+ if (atds)
+ value |= DMA_BUS_MODE_ATDS;
+
+ if (dma_cfg->aal)
+ value |= DMA_BUS_MODE_AAL;
+
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
+
+static void dwmac1000_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t dma_rx_phy, u32 chan)
+{
+ /* RX descriptor base address list must be written into DMA CSR3 */
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
+}
+
+static void dwmac1000_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t dma_tx_phy, u32 chan)
+{
+ /* TX descriptor base address list must be written into DMA CSR4 */
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
+}
+
+static u32 dwmac1000_configure_fc(u32 csr6, int rxfifosz)
+{
+ csr6 &= ~DMA_CONTROL_RFA_MASK;
+ csr6 &= ~DMA_CONTROL_RFD_MASK;
+
+ /* Leave flow control disabled if receive fifo size is less than
+ * 4K or 0. Otherwise, send XOFF when fifo is 1K less than full,
+ * and send XON when 2K less than full.
+ */
+ if (rxfifosz < 4096) {
+ csr6 &= ~DMA_CONTROL_EFC;
+ pr_debug("GMAC: disabling flow control, rxfifo too small(%d)\n",
+ rxfifosz);
+ } else {
+ csr6 |= DMA_CONTROL_EFC;
+ csr6 |= RFA_FULL_MINUS_1K;
+ csr6 |= RFD_FULL_MINUS_2K;
+ }
+ return csr6;
+}
+
+static void dwmac1000_dma_operation_mode_rx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ csr6 |= DMA_CONTROL_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
+ csr6 &= ~DMA_CONTROL_RSF;
+ csr6 &= DMA_CONTROL_TC_RX_MASK;
+ if (mode <= 32)
+ csr6 |= DMA_CONTROL_RTC_32;
+ else if (mode <= 64)
+ csr6 |= DMA_CONTROL_RTC_64;
+ else if (mode <= 96)
+ csr6 |= DMA_CONTROL_RTC_96;
+ else
+ csr6 |= DMA_CONTROL_RTC_128;
+ }
+
+ /* Configure flow control based on rx fifo size */
+ csr6 = dwmac1000_configure_fc(csr6, fifosz);
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac1000_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable TX store and forward mode\n");
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
+ csr6 |= DMA_CONTROL_TSF;
+ /* Operating on second frame increase the performance
+ * especially when transmit store-and-forward is used.
+ */
+ csr6 |= DMA_CONTROL_OSF;
+ } else {
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
+ csr6 &= ~DMA_CONTROL_TSF;
+ csr6 &= DMA_CONTROL_TC_TX_MASK;
+ /* Set the transmit threshold */
+ if (mode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (mode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else if (mode <= 128)
+ csr6 |= DMA_CONTROL_TTC_128;
+ else if (mode <= 192)
+ csr6 |= DMA_CONTROL_TTC_192;
+ else
+ csr6 |= DMA_CONTROL_TTC_256;
+ }
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac1000_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = 0; i < NUM_DWMAC1000_DMA_REGS; i++)
+ if ((i < 12) || (i > 17))
+ reg_space[DMA_BUS_MODE / 4 + i] =
+ readl(ioaddr + DMA_BUS_MODE + i * 4);
+}
+
+static int dwmac1000_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
+{
+ u32 hw_cap = readl(ioaddr + DMA_HW_FEATURE);
+
+ if (!hw_cap) {
+ /* 0x00000000 is the value read on old hardware that does not
+ * implement this register
+ */
+ return -EOPNOTSUPP;
+ }
+
+ dma_cap->mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
+ dma_cap->mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
+ dma_cap->half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
+ dma_cap->hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
+ dma_cap->multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
+ dma_cap->pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
+ dma_cap->sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
+ dma_cap->pmt_remote_wake_up = (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
+ dma_cap->pmt_magic_frame = (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
+ /* MMC */
+ dma_cap->rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
+ /* IEEE 1588-2002 */
+ dma_cap->time_stamp =
+ (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
+ /* IEEE 1588-2008 */
+ dma_cap->atime_stamp = (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ dma_cap->eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
+ dma_cap->av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
+ /* TX and RX csum */
+ dma_cap->tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
+ dma_cap->rx_coe_type1 = (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
+ dma_cap->rx_coe_type2 = (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
+ dma_cap->rxfifo_over_2048 = (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
+ /* TX and RX number of channels */
+ dma_cap->number_rx_channel = (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
+ dma_cap->number_tx_channel = (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
+ /* Alternate (enhanced) DESC mode */
+ dma_cap->enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
+
+ return 0;
+}
+
+static void dwmac1000_rx_watchdog(void __iomem *ioaddr, u32 riwt,
+ u32 queue)
+{
+ writel(riwt, ioaddr + DMA_RX_WATCHDOG);
+}
+
+const struct stmmac_dma_ops dwmac1000_dma_ops = {
+ .reset = dwmac_dma_reset,
+ .init = dwmac1000_dma_init,
+ .init_rx_chan = dwmac1000_dma_init_rx,
+ .init_tx_chan = dwmac1000_dma_init_tx,
+ .axi = dwmac1000_dma_axi,
+ .dump_regs = dwmac1000_dump_dma_regs,
+ .dma_rx_mode = dwmac1000_dma_operation_mode_rx,
+ .dma_tx_mode = dwmac1000_dma_operation_mode_tx,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+ .get_hw_feature = dwmac1000_get_hw_feature,
+ .rx_watchdog = dwmac1000_rx_watchdog,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
new file mode 100644
index 000000000..a6e8d7bd9
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This only implements the mac core functions for this chip.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/crc32.h>
+#include <asm/io.h>
+#include "stmmac.h"
+#include "dwmac100.h"
+
+static void dwmac100_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ value |= MAC_CORE_INIT;
+
+ writel(value, ioaddr + MAC_CONTROL);
+
+#ifdef STMMAC_VLAN_TAG_USED
+ writel(ETH_P_8021Q, ioaddr + MAC_VLAN1);
+#endif
+}
+
+static void dwmac100_dump_mac_regs(struct mac_device_info *hw, u32 *reg_space)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ reg_space[MAC_CONTROL / 4] = readl(ioaddr + MAC_CONTROL);
+ reg_space[MAC_ADDR_HIGH / 4] = readl(ioaddr + MAC_ADDR_HIGH);
+ reg_space[MAC_ADDR_LOW / 4] = readl(ioaddr + MAC_ADDR_LOW);
+ reg_space[MAC_HASH_HIGH / 4] = readl(ioaddr + MAC_HASH_HIGH);
+ reg_space[MAC_HASH_LOW / 4] = readl(ioaddr + MAC_HASH_LOW);
+ reg_space[MAC_FLOW_CTRL / 4] = readl(ioaddr + MAC_FLOW_CTRL);
+ reg_space[MAC_VLAN1 / 4] = readl(ioaddr + MAC_VLAN1);
+ reg_space[MAC_VLAN2 / 4] = readl(ioaddr + MAC_VLAN2);
+}
+
+static int dwmac100_rx_ipc_enable(struct mac_device_info *hw)
+{
+ return 0;
+}
+
+static int dwmac100_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ return 0;
+}
+
+static void dwmac100_set_umac_addr(struct mac_device_info *hw,
+ const unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
+}
+
+static void dwmac100_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= MAC_CONTROL_PR;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_IF | MAC_CONTROL_HO |
+ MAC_CONTROL_HP);
+ } else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
+ || (dev->flags & IFF_ALLMULTI)) {
+ value |= MAC_CONTROL_PM;
+ value &= ~(MAC_CONTROL_PR | MAC_CONTROL_IF | MAC_CONTROL_HO);
+ writel(0xffffffff, ioaddr + MAC_HASH_HIGH);
+ writel(0xffffffff, ioaddr + MAC_HASH_LOW);
+ } else if (netdev_mc_empty(dev)) { /* no multicast */
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR | MAC_CONTROL_IF |
+ MAC_CONTROL_HO | MAC_CONTROL_HP);
+ } else {
+ u32 mc_filter[2];
+ struct netdev_hw_addr *ha;
+
+ /* Perfect filter mode for physical address and Hash
+ * filter for multicast
+ */
+ value |= MAC_CONTROL_HP;
+ value &= ~(MAC_CONTROL_PM | MAC_CONTROL_PR |
+ MAC_CONTROL_IF | MAC_CONTROL_HO);
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper 6 bits of the calculated CRC are used to
+ * index the contens of the hash table
+ */
+ int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register.
+ */
+ mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
+ }
+ writel(mc_filter[0], ioaddr + MAC_HASH_LOW);
+ writel(mc_filter[1], ioaddr + MAC_HASH_HIGH);
+ }
+
+ writel(value, ioaddr + MAC_CONTROL);
+}
+
+static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ unsigned int flow = MAC_FLOW_CTRL_ENABLE;
+
+ if (duplex)
+ flow |= (pause_time << MAC_FLOW_CTRL_PT_SHIFT);
+ writel(flow, ioaddr + MAC_FLOW_CTRL);
+}
+
+/* No PMT module supported on ST boards with this Eth chip. */
+static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ return;
+}
+
+static void dwmac100_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + MAC_CONTROL);
+
+ if (enable)
+ value |= MAC_CONTROL_OM;
+ else
+ value &= ~MAC_CONTROL_OM;
+
+ writel(value, ioaddr + MAC_CONTROL);
+}
+
+const struct stmmac_ops dwmac100_ops = {
+ .core_init = dwmac100_core_init,
+ .set_mac = stmmac_set_mac,
+ .rx_ipc = dwmac100_rx_ipc_enable,
+ .dump_regs = dwmac100_dump_mac_regs,
+ .host_irq_status = dwmac100_irq_status,
+ .set_filter = dwmac100_set_filter,
+ .flow_ctrl = dwmac100_flow_ctrl,
+ .pmt = dwmac100_pmt,
+ .set_umac_addr = dwmac100_set_umac_addr,
+ .get_umac_addr = dwmac100_get_umac_addr,
+ .set_mac_loopback = dwmac100_set_mac_loopback,
+};
+
+int dwmac100_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tDWMAC100\n");
+
+ mac->pcsr = priv->ioaddr;
+ mac->link.duplex = MAC_CONTROL_F;
+ mac->link.speed10 = 0;
+ mac->link.speed100 = 0;
+ mac->link.speed1000 = 0;
+ mac->link.speed_mask = MAC_CONTROL_PS;
+ mac->mii.addr = MAC_MII_ADDR;
+ mac->mii.data = MAC_MII_DATA;
+ mac->mii.addr_shift = 11;
+ mac->mii.addr_mask = 0x0000F800;
+ mac->mii.reg_shift = 6;
+ mac->mii.reg_mask = 0x000007C0;
+ mac->mii.clk_csr_shift = 2;
+ mac->mii.clk_csr_mask = GENMASK(5, 2);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
new file mode 100644
index 000000000..8f0d9bc7c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This is the driver for the MAC 10/100 on-chip Ethernet controller
+ currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
+
+ DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
+ this code.
+
+ This contains the functions to handle the dma.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <asm/io.h>
+#include "dwmac100.h"
+#include "dwmac_dma.h"
+
+static void dwmac100_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, int atds)
+{
+ /* Enable Application Access by writing to DMA CSR0 */
+ writel(DMA_BUS_MODE_DEFAULT | (dma_cfg->pbl << DMA_BUS_MODE_PBL_SHIFT),
+ ioaddr + DMA_BUS_MODE);
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
+}
+
+static void dwmac100_dma_init_rx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t dma_rx_phy, u32 chan)
+{
+ /* RX descriptor base addr lists must be written into DMA CSR3 */
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_RCV_BASE_ADDR);
+}
+
+static void dwmac100_dma_init_tx(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t dma_tx_phy, u32 chan)
+{
+ /* TX descriptor base addr lists must be written into DMA CSR4 */
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_TX_BASE_ADDR);
+}
+
+/* Store and Forward capability is not used at all.
+ *
+ * The transmit threshold can be programmed by setting the TTC bits in the DMA
+ * control register.
+ */
+static void dwmac100_dma_operation_mode_tx(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+
+ if (mode <= 32)
+ csr6 |= DMA_CONTROL_TTC_32;
+ else if (mode <= 64)
+ csr6 |= DMA_CONTROL_TTC_64;
+ else
+ csr6 |= DMA_CONTROL_TTC_128;
+
+ writel(csr6, ioaddr + DMA_CONTROL);
+}
+
+static void dwmac100_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = 0; i < NUM_DWMAC100_DMA_REGS; i++)
+ reg_space[DMA_BUS_MODE / 4 + i] =
+ readl(ioaddr + DMA_BUS_MODE + i * 4);
+
+ reg_space[DMA_CUR_TX_BUF_ADDR / 4] =
+ readl(ioaddr + DMA_CUR_TX_BUF_ADDR);
+ reg_space[DMA_CUR_RX_BUF_ADDR / 4] =
+ readl(ioaddr + DMA_CUR_RX_BUF_ADDR);
+}
+
+/* DMA controller has two counters to track the number of the missed frames. */
+static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
+ void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
+
+ if (unlikely(csr8)) {
+ if (csr8 & DMA_MISSED_FRAME_OVE) {
+ stats->rx_over_errors += 0x800;
+ x->rx_overflow_cntr += 0x800;
+ } else {
+ unsigned int ove_cntr;
+ ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
+ stats->rx_over_errors += ove_cntr;
+ x->rx_overflow_cntr += ove_cntr;
+ }
+
+ if (csr8 & DMA_MISSED_FRAME_OVE_M) {
+ stats->rx_missed_errors += 0xffff;
+ x->rx_missed_cntr += 0xffff;
+ } else {
+ unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
+ stats->rx_missed_errors += miss_f;
+ x->rx_missed_cntr += miss_f;
+ }
+ }
+}
+
+const struct stmmac_dma_ops dwmac100_dma_ops = {
+ .reset = dwmac_dma_reset,
+ .init = dwmac100_dma_init,
+ .init_rx_chan = dwmac100_dma_init_rx,
+ .init_tx_chan = dwmac100_dma_init_tx,
+ .dump_regs = dwmac100_dump_dma_regs,
+ .dma_tx_mode = dwmac100_dma_operation_mode_tx,
+ .dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
+ .enable_dma_transmission = dwmac_enable_dma_transmission,
+ .enable_dma_irq = dwmac_enable_dma_irq,
+ .disable_dma_irq = dwmac_disable_dma_irq,
+ .start_tx = dwmac_dma_start_tx,
+ .stop_tx = dwmac_dma_stop_tx,
+ .start_rx = dwmac_dma_start_rx,
+ .stop_rx = dwmac_dma_stop_rx,
+ .dma_interrupt = dwmac_dma_interrupt,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
new file mode 100644
index 000000000..12c0e6080
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4.h
@@ -0,0 +1,520 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * DWMAC4 Header file.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_H__
+#define __DWMAC4_H__
+
+#include "common.h"
+
+/* MAC registers */
+#define GMAC_CONFIG 0x00000000
+#define GMAC_EXT_CONFIG 0x00000004
+#define GMAC_PACKET_FILTER 0x00000008
+#define GMAC_HASH_TAB(x) (0x10 + (x) * 4)
+#define GMAC_VLAN_TAG 0x00000050
+#define GMAC_VLAN_TAG_DATA 0x00000054
+#define GMAC_VLAN_HASH_TABLE 0x00000058
+#define GMAC_RX_FLOW_CTRL 0x00000090
+#define GMAC_VLAN_INCL 0x00000060
+#define GMAC_QX_TX_FLOW_CTRL(x) (0x70 + x * 4)
+#define GMAC_TXQ_PRTY_MAP0 0x98
+#define GMAC_TXQ_PRTY_MAP1 0x9C
+#define GMAC_RXQ_CTRL0 0x000000a0
+#define GMAC_RXQ_CTRL1 0x000000a4
+#define GMAC_RXQ_CTRL2 0x000000a8
+#define GMAC_RXQ_CTRL3 0x000000ac
+#define GMAC_INT_STATUS 0x000000b0
+#define GMAC_INT_EN 0x000000b4
+#define GMAC_1US_TIC_COUNTER 0x000000dc
+#define GMAC_PCS_BASE 0x000000e0
+#define GMAC_PHYIF_CONTROL_STATUS 0x000000f8
+#define GMAC_PMT 0x000000c0
+#define GMAC_DEBUG 0x00000114
+#define GMAC_HW_FEATURE0 0x0000011c
+#define GMAC_HW_FEATURE1 0x00000120
+#define GMAC_HW_FEATURE2 0x00000124
+#define GMAC_HW_FEATURE3 0x00000128
+#define GMAC_MDIO_ADDR 0x00000200
+#define GMAC_MDIO_DATA 0x00000204
+#define GMAC_GPIO_STATUS 0x0000020C
+#define GMAC_ARP_ADDR 0x00000210
+#define GMAC_ADDR_HIGH(reg) (0x300 + reg * 8)
+#define GMAC_ADDR_LOW(reg) (0x304 + reg * 8)
+#define GMAC_L3L4_CTRL(reg) (0x900 + (reg) * 0x30)
+#define GMAC_L4_ADDR(reg) (0x904 + (reg) * 0x30)
+#define GMAC_L3_ADDR0(reg) (0x910 + (reg) * 0x30)
+#define GMAC_L3_ADDR1(reg) (0x914 + (reg) * 0x30)
+#define GMAC_TIMESTAMP_STATUS 0x00000b20
+
+/* RX Queues Routing */
+#define GMAC_RXQCTRL_AVCPQ_MASK GENMASK(2, 0)
+#define GMAC_RXQCTRL_AVCPQ_SHIFT 0
+#define GMAC_RXQCTRL_PTPQ_MASK GENMASK(6, 4)
+#define GMAC_RXQCTRL_PTPQ_SHIFT 4
+#define GMAC_RXQCTRL_DCBCPQ_MASK GENMASK(10, 8)
+#define GMAC_RXQCTRL_DCBCPQ_SHIFT 8
+#define GMAC_RXQCTRL_UPQ_MASK GENMASK(14, 12)
+#define GMAC_RXQCTRL_UPQ_SHIFT 12
+#define GMAC_RXQCTRL_MCBCQ_MASK GENMASK(18, 16)
+#define GMAC_RXQCTRL_MCBCQ_SHIFT 16
+#define GMAC_RXQCTRL_MCBCQEN BIT(20)
+#define GMAC_RXQCTRL_MCBCQEN_SHIFT 20
+#define GMAC_RXQCTRL_TACPQE BIT(21)
+#define GMAC_RXQCTRL_TACPQE_SHIFT 21
+#define GMAC_RXQCTRL_FPRQ GENMASK(26, 24)
+#define GMAC_RXQCTRL_FPRQ_SHIFT 24
+
+/* MAC Packet Filtering */
+#define GMAC_PACKET_FILTER_PR BIT(0)
+#define GMAC_PACKET_FILTER_HMC BIT(2)
+#define GMAC_PACKET_FILTER_PM BIT(4)
+#define GMAC_PACKET_FILTER_PCF BIT(7)
+#define GMAC_PACKET_FILTER_HPF BIT(10)
+#define GMAC_PACKET_FILTER_VTFE BIT(16)
+#define GMAC_PACKET_FILTER_IPFE BIT(20)
+#define GMAC_PACKET_FILTER_RA BIT(31)
+
+#define GMAC_MAX_PERFECT_ADDRESSES 128
+
+/* MAC VLAN */
+#define GMAC_VLAN_EDVLP BIT(26)
+#define GMAC_VLAN_VTHM BIT(25)
+#define GMAC_VLAN_DOVLTC BIT(20)
+#define GMAC_VLAN_ESVL BIT(18)
+#define GMAC_VLAN_ETV BIT(16)
+#define GMAC_VLAN_VID GENMASK(15, 0)
+#define GMAC_VLAN_VLTI BIT(20)
+#define GMAC_VLAN_CSVL BIT(19)
+#define GMAC_VLAN_VLC GENMASK(17, 16)
+#define GMAC_VLAN_VLC_SHIFT 16
+#define GMAC_VLAN_VLHT GENMASK(15, 0)
+
+/* MAC VLAN Tag */
+#define GMAC_VLAN_TAG_VID GENMASK(15, 0)
+#define GMAC_VLAN_TAG_ETV BIT(16)
+
+/* MAC VLAN Tag Control */
+#define GMAC_VLAN_TAG_CTRL_OB BIT(0)
+#define GMAC_VLAN_TAG_CTRL_CT BIT(1)
+#define GMAC_VLAN_TAG_CTRL_OFS_MASK GENMASK(6, 2)
+#define GMAC_VLAN_TAG_CTRL_OFS_SHIFT 2
+#define GMAC_VLAN_TAG_CTRL_EVLS_MASK GENMASK(22, 21)
+#define GMAC_VLAN_TAG_CTRL_EVLS_SHIFT 21
+#define GMAC_VLAN_TAG_CTRL_EVLRXS BIT(24)
+
+#define GMAC_VLAN_TAG_STRIP_NONE (0x0 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+#define GMAC_VLAN_TAG_STRIP_PASS (0x1 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+#define GMAC_VLAN_TAG_STRIP_FAIL (0x2 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+#define GMAC_VLAN_TAG_STRIP_ALL (0x3 << GMAC_VLAN_TAG_CTRL_EVLS_SHIFT)
+
+/* MAC VLAN Tag Data/Filter */
+#define GMAC_VLAN_TAG_DATA_VID GENMASK(15, 0)
+#define GMAC_VLAN_TAG_DATA_VEN BIT(16)
+#define GMAC_VLAN_TAG_DATA_ETV BIT(17)
+
+/* MAC RX Queue Enable */
+#define GMAC_RX_QUEUE_CLEAR(queue) ~(GENMASK(1, 0) << ((queue) * 2))
+#define GMAC_RX_AV_QUEUE_ENABLE(queue) BIT((queue) * 2)
+#define GMAC_RX_DCB_QUEUE_ENABLE(queue) BIT(((queue) * 2) + 1)
+
+/* MAC Flow Control RX */
+#define GMAC_RX_FLOW_CTRL_RFE BIT(0)
+
+/* RX Queues Priorities */
+#define GMAC_RXQCTRL_PSRQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
+#define GMAC_RXQCTRL_PSRQX_SHIFT(x) ((x) * 8)
+
+/* TX Queues Priorities */
+#define GMAC_TXQCTRL_PSTQX_MASK(x) GENMASK(7 + ((x) * 8), 0 + ((x) * 8))
+#define GMAC_TXQCTRL_PSTQX_SHIFT(x) ((x) * 8)
+
+/* MAC Flow Control TX */
+#define GMAC_TX_FLOW_CTRL_TFE BIT(1)
+#define GMAC_TX_FLOW_CTRL_PT_SHIFT 16
+
+/* MAC Interrupt bitmap*/
+#define GMAC_INT_RGSMIIS BIT(0)
+#define GMAC_INT_PCS_LINK BIT(1)
+#define GMAC_INT_PCS_ANE BIT(2)
+#define GMAC_INT_PCS_PHYIS BIT(3)
+#define GMAC_INT_PMT_EN BIT(4)
+#define GMAC_INT_LPI_EN BIT(5)
+#define GMAC_INT_TSIE BIT(12)
+
+#define GMAC_PCS_IRQ_DEFAULT (GMAC_INT_RGSMIIS | GMAC_INT_PCS_LINK | \
+ GMAC_INT_PCS_ANE)
+
+#define GMAC_INT_DEFAULT_ENABLE (GMAC_INT_PMT_EN | GMAC_INT_LPI_EN | \
+ GMAC_INT_TSIE)
+
+enum dwmac4_irq_status {
+ time_stamp_irq = 0x00001000,
+ mmc_rx_csum_offload_irq = 0x00000800,
+ mmc_tx_irq = 0x00000400,
+ mmc_rx_irq = 0x00000200,
+ mmc_irq = 0x00000100,
+ lpi_irq = 0x00000020,
+ pmt_irq = 0x00000010,
+};
+
+/* MAC PMT bitmap */
+enum power_event {
+ pointer_reset = 0x80000000,
+ global_unicast = 0x00000200,
+ wake_up_rx_frame = 0x00000040,
+ magic_frame = 0x00000020,
+ wake_up_frame_en = 0x00000004,
+ magic_pkt_en = 0x00000002,
+ power_down = 0x00000001,
+};
+
+/* Energy Efficient Ethernet (EEE) for GMAC4
+ *
+ * LPI status, timer and control register offset
+ */
+#define GMAC4_LPI_CTRL_STATUS 0xd0
+#define GMAC4_LPI_TIMER_CTRL 0xd4
+#define GMAC4_LPI_ENTRY_TIMER 0xd8
+#define GMAC4_MAC_ONEUS_TIC_COUNTER 0xdc
+
+/* LPI control and status defines */
+#define GMAC4_LPI_CTRL_STATUS_LPITCSE BIT(21) /* LPI Tx Clock Stop Enable */
+#define GMAC4_LPI_CTRL_STATUS_LPIATE BIT(20) /* LPI Timer Enable */
+#define GMAC4_LPI_CTRL_STATUS_LPITXA BIT(19) /* Enable LPI TX Automate */
+#define GMAC4_LPI_CTRL_STATUS_PLS BIT(17) /* PHY Link Status */
+#define GMAC4_LPI_CTRL_STATUS_LPIEN BIT(16) /* LPI Enable */
+#define GMAC4_LPI_CTRL_STATUS_RLPIEX BIT(3) /* Receive LPI Exit */
+#define GMAC4_LPI_CTRL_STATUS_RLPIEN BIT(2) /* Receive LPI Entry */
+#define GMAC4_LPI_CTRL_STATUS_TLPIEX BIT(1) /* Transmit LPI Exit */
+#define GMAC4_LPI_CTRL_STATUS_TLPIEN BIT(0) /* Transmit LPI Entry */
+
+/* MAC Debug bitmap */
+#define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17)
+#define GMAC_DEBUG_TFCSTS_SHIFT 17
+#define GMAC_DEBUG_TFCSTS_IDLE 0
+#define GMAC_DEBUG_TFCSTS_WAIT 1
+#define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2
+#define GMAC_DEBUG_TFCSTS_XFER 3
+#define GMAC_DEBUG_TPESTS BIT(16)
+#define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1)
+#define GMAC_DEBUG_RFCFCSTS_SHIFT 1
+#define GMAC_DEBUG_RPESTS BIT(0)
+
+/* MAC config */
+#define GMAC_CONFIG_ARPEN BIT(31)
+#define GMAC_CONFIG_SARC GENMASK(30, 28)
+#define GMAC_CONFIG_SARC_SHIFT 28
+#define GMAC_CONFIG_IPC BIT(27)
+#define GMAC_CONFIG_IPG GENMASK(26, 24)
+#define GMAC_CONFIG_IPG_SHIFT 24
+#define GMAC_CONFIG_2K BIT(22)
+#define GMAC_CONFIG_ACS BIT(20)
+#define GMAC_CONFIG_BE BIT(18)
+#define GMAC_CONFIG_JD BIT(17)
+#define GMAC_CONFIG_JE BIT(16)
+#define GMAC_CONFIG_PS BIT(15)
+#define GMAC_CONFIG_FES BIT(14)
+#define GMAC_CONFIG_FES_SHIFT 14
+#define GMAC_CONFIG_DM BIT(13)
+#define GMAC_CONFIG_LM BIT(12)
+#define GMAC_CONFIG_DCRS BIT(9)
+#define GMAC_CONFIG_TE BIT(1)
+#define GMAC_CONFIG_RE BIT(0)
+
+/* MAC extended config */
+#define GMAC_CONFIG_EIPG GENMASK(29, 25)
+#define GMAC_CONFIG_EIPG_SHIFT 25
+#define GMAC_CONFIG_EIPG_EN BIT(24)
+#define GMAC_CONFIG_HDSMS GENMASK(22, 20)
+#define GMAC_CONFIG_HDSMS_SHIFT 20
+#define GMAC_CONFIG_HDSMS_256 (0x2 << GMAC_CONFIG_HDSMS_SHIFT)
+
+/* MAC HW features0 bitmap */
+#define GMAC_HW_FEAT_SAVLANINS BIT(27)
+#define GMAC_HW_FEAT_ADDMAC BIT(18)
+#define GMAC_HW_FEAT_RXCOESEL BIT(16)
+#define GMAC_HW_FEAT_TXCOSEL BIT(14)
+#define GMAC_HW_FEAT_EEESEL BIT(13)
+#define GMAC_HW_FEAT_TSSEL BIT(12)
+#define GMAC_HW_FEAT_ARPOFFSEL BIT(9)
+#define GMAC_HW_FEAT_MMCSEL BIT(8)
+#define GMAC_HW_FEAT_MGKSEL BIT(7)
+#define GMAC_HW_FEAT_RWKSEL BIT(6)
+#define GMAC_HW_FEAT_SMASEL BIT(5)
+#define GMAC_HW_FEAT_VLHASH BIT(4)
+#define GMAC_HW_FEAT_PCSSEL BIT(3)
+#define GMAC_HW_FEAT_HDSEL BIT(2)
+#define GMAC_HW_FEAT_GMIISEL BIT(1)
+#define GMAC_HW_FEAT_MIISEL BIT(0)
+
+/* MAC HW features1 bitmap */
+#define GMAC_HW_FEAT_L3L4FNUM GENMASK(30, 27)
+#define GMAC_HW_HASH_TB_SZ GENMASK(25, 24)
+#define GMAC_HW_FEAT_AVSEL BIT(20)
+#define GMAC_HW_TSOEN BIT(18)
+#define GMAC_HW_FEAT_SPHEN BIT(17)
+#define GMAC_HW_ADDR64 GENMASK(15, 14)
+#define GMAC_HW_TXFIFOSIZE GENMASK(10, 6)
+#define GMAC_HW_RXFIFOSIZE GENMASK(4, 0)
+
+/* MAC HW features2 bitmap */
+#define GMAC_HW_FEAT_AUXSNAPNUM GENMASK(30, 28)
+#define GMAC_HW_FEAT_PPSOUTNUM GENMASK(26, 24)
+#define GMAC_HW_FEAT_TXCHCNT GENMASK(21, 18)
+#define GMAC_HW_FEAT_RXCHCNT GENMASK(15, 12)
+#define GMAC_HW_FEAT_TXQCNT GENMASK(9, 6)
+#define GMAC_HW_FEAT_RXQCNT GENMASK(3, 0)
+
+/* MAC HW features3 bitmap */
+#define GMAC_HW_FEAT_ASP GENMASK(29, 28)
+#define GMAC_HW_FEAT_TBSSEL BIT(27)
+#define GMAC_HW_FEAT_FPESEL BIT(26)
+#define GMAC_HW_FEAT_ESTWID GENMASK(21, 20)
+#define GMAC_HW_FEAT_ESTDEP GENMASK(19, 17)
+#define GMAC_HW_FEAT_ESTSEL BIT(16)
+#define GMAC_HW_FEAT_FRPES GENMASK(14, 13)
+#define GMAC_HW_FEAT_FRPBS GENMASK(12, 11)
+#define GMAC_HW_FEAT_FRPSEL BIT(10)
+#define GMAC_HW_FEAT_DVLAN BIT(5)
+#define GMAC_HW_FEAT_NRVF GENMASK(2, 0)
+
+/* GMAC GPIO Status reg */
+#define GMAC_GPO0 BIT(16)
+#define GMAC_GPO1 BIT(17)
+#define GMAC_GPO2 BIT(18)
+#define GMAC_GPO3 BIT(19)
+
+/* MAC HW ADDR regs */
+#define GMAC_HI_DCS GENMASK(18, 16)
+#define GMAC_HI_DCS_SHIFT 16
+#define GMAC_HI_REG_AE BIT(31)
+
+/* L3/L4 Filters regs */
+#define GMAC_L4DPIM0 BIT(21)
+#define GMAC_L4DPM0 BIT(20)
+#define GMAC_L4SPIM0 BIT(19)
+#define GMAC_L4SPM0 BIT(18)
+#define GMAC_L4PEN0 BIT(16)
+#define GMAC_L3DAIM0 BIT(5)
+#define GMAC_L3DAM0 BIT(4)
+#define GMAC_L3SAIM0 BIT(3)
+#define GMAC_L3SAM0 BIT(2)
+#define GMAC_L3PEN0 BIT(0)
+#define GMAC_L4DP0 GENMASK(31, 16)
+#define GMAC_L4DP0_SHIFT 16
+#define GMAC_L4SP0 GENMASK(15, 0)
+
+/* MAC Timestamp Status */
+#define GMAC_TIMESTAMP_AUXTSTRIG BIT(2)
+#define GMAC_TIMESTAMP_ATSNS_MASK GENMASK(29, 25)
+#define GMAC_TIMESTAMP_ATSNS_SHIFT 25
+
+/* MTL registers */
+#define MTL_OPERATION_MODE 0x00000c00
+#define MTL_FRPE BIT(15)
+#define MTL_OPERATION_SCHALG_MASK GENMASK(6, 5)
+#define MTL_OPERATION_SCHALG_WRR (0x0 << 5)
+#define MTL_OPERATION_SCHALG_WFQ (0x1 << 5)
+#define MTL_OPERATION_SCHALG_DWRR (0x2 << 5)
+#define MTL_OPERATION_SCHALG_SP (0x3 << 5)
+#define MTL_OPERATION_RAA BIT(2)
+#define MTL_OPERATION_RAA_SP (0x0 << 2)
+#define MTL_OPERATION_RAA_WSP (0x1 << 2)
+
+#define MTL_INT_STATUS 0x00000c20
+#define MTL_INT_QX(x) BIT(x)
+
+#define MTL_RXQ_DMA_MAP0 0x00000c30 /* queue 0 to 3 */
+#define MTL_RXQ_DMA_MAP1 0x00000c34 /* queue 4 to 7 */
+#define MTL_RXQ_DMA_Q04MDMACH_MASK GENMASK(3, 0)
+#define MTL_RXQ_DMA_Q04MDMACH(x) ((x) << 0)
+#define MTL_RXQ_DMA_QXMDMACH_MASK(x) GENMASK(11 + (8 * ((x) - 1)), 8 * (x))
+#define MTL_RXQ_DMA_QXMDMACH(chan, q) ((chan) << (8 * (q)))
+
+#define MTL_CHAN_BASE_ADDR 0x00000d00
+#define MTL_CHAN_BASE_OFFSET 0x40
+#define MTL_CHANX_BASE_ADDR(x) (MTL_CHAN_BASE_ADDR + \
+ (x * MTL_CHAN_BASE_OFFSET))
+
+#define MTL_CHAN_TX_OP_MODE(x) MTL_CHANX_BASE_ADDR(x)
+#define MTL_CHAN_TX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x8)
+#define MTL_CHAN_INT_CTRL(x) (MTL_CHANX_BASE_ADDR(x) + 0x2c)
+#define MTL_CHAN_RX_OP_MODE(x) (MTL_CHANX_BASE_ADDR(x) + 0x30)
+#define MTL_CHAN_RX_DEBUG(x) (MTL_CHANX_BASE_ADDR(x) + 0x38)
+
+#define MTL_OP_MODE_RSF BIT(5)
+#define MTL_OP_MODE_TXQEN_MASK GENMASK(3, 2)
+#define MTL_OP_MODE_TXQEN_AV BIT(2)
+#define MTL_OP_MODE_TXQEN BIT(3)
+#define MTL_OP_MODE_TSF BIT(1)
+
+#define MTL_OP_MODE_TQS_MASK GENMASK(24, 16)
+#define MTL_OP_MODE_TQS_SHIFT 16
+
+#define MTL_OP_MODE_TTC_MASK 0x70
+#define MTL_OP_MODE_TTC_SHIFT 4
+
+#define MTL_OP_MODE_TTC_32 0
+#define MTL_OP_MODE_TTC_64 (1 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_96 (2 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_128 (3 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_192 (4 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_256 (5 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_384 (6 << MTL_OP_MODE_TTC_SHIFT)
+#define MTL_OP_MODE_TTC_512 (7 << MTL_OP_MODE_TTC_SHIFT)
+
+#define MTL_OP_MODE_RQS_MASK GENMASK(29, 20)
+#define MTL_OP_MODE_RQS_SHIFT 20
+
+#define MTL_OP_MODE_RFD_MASK GENMASK(19, 14)
+#define MTL_OP_MODE_RFD_SHIFT 14
+
+#define MTL_OP_MODE_RFA_MASK GENMASK(13, 8)
+#define MTL_OP_MODE_RFA_SHIFT 8
+
+#define MTL_OP_MODE_EHFC BIT(7)
+
+#define MTL_OP_MODE_RTC_MASK 0x18
+#define MTL_OP_MODE_RTC_SHIFT 3
+
+#define MTL_OP_MODE_RTC_32 (1 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_64 0
+#define MTL_OP_MODE_RTC_96 (2 << MTL_OP_MODE_RTC_SHIFT)
+#define MTL_OP_MODE_RTC_128 (3 << MTL_OP_MODE_RTC_SHIFT)
+
+/* MTL ETS Control register */
+#define MTL_ETS_CTRL_BASE_ADDR 0x00000d10
+#define MTL_ETS_CTRL_BASE_OFFSET 0x40
+#define MTL_ETSX_CTRL_BASE_ADDR(x) (MTL_ETS_CTRL_BASE_ADDR + \
+ ((x) * MTL_ETS_CTRL_BASE_OFFSET))
+
+#define MTL_ETS_CTRL_CC BIT(3)
+#define MTL_ETS_CTRL_AVALG BIT(2)
+
+/* MTL Queue Quantum Weight */
+#define MTL_TXQ_WEIGHT_BASE_ADDR 0x00000d18
+#define MTL_TXQ_WEIGHT_BASE_OFFSET 0x40
+#define MTL_TXQX_WEIGHT_BASE_ADDR(x) (MTL_TXQ_WEIGHT_BASE_ADDR + \
+ ((x) * MTL_TXQ_WEIGHT_BASE_OFFSET))
+#define MTL_TXQ_WEIGHT_ISCQW_MASK GENMASK(20, 0)
+
+/* MTL sendSlopeCredit register */
+#define MTL_SEND_SLP_CRED_BASE_ADDR 0x00000d1c
+#define MTL_SEND_SLP_CRED_OFFSET 0x40
+#define MTL_SEND_SLP_CREDX_BASE_ADDR(x) (MTL_SEND_SLP_CRED_BASE_ADDR + \
+ ((x) * MTL_SEND_SLP_CRED_OFFSET))
+
+#define MTL_SEND_SLP_CRED_SSC_MASK GENMASK(13, 0)
+
+/* MTL hiCredit register */
+#define MTL_HIGH_CRED_BASE_ADDR 0x00000d20
+#define MTL_HIGH_CRED_OFFSET 0x40
+#define MTL_HIGH_CREDX_BASE_ADDR(x) (MTL_HIGH_CRED_BASE_ADDR + \
+ ((x) * MTL_HIGH_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_HC_MASK GENMASK(28, 0)
+
+/* MTL loCredit register */
+#define MTL_LOW_CRED_BASE_ADDR 0x00000d24
+#define MTL_LOW_CRED_OFFSET 0x40
+#define MTL_LOW_CREDX_BASE_ADDR(x) (MTL_LOW_CRED_BASE_ADDR + \
+ ((x) * MTL_LOW_CRED_OFFSET))
+
+#define MTL_HIGH_CRED_LC_MASK GENMASK(28, 0)
+
+/* MTL debug */
+#define MTL_DEBUG_TXSTSFSTS BIT(5)
+#define MTL_DEBUG_TXFSTS BIT(4)
+#define MTL_DEBUG_TWCSTS BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT 1
+#define MTL_DEBUG_TRCSTS_IDLE 0
+#define MTL_DEBUG_TRCSTS_READ 1
+#define MTL_DEBUG_TRCSTS_TXW 2
+#define MTL_DEBUG_TRCSTS_WRITE 3
+#define MTL_DEBUG_TXPAUSED BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT 4
+#define MTL_DEBUG_RXFSTS_EMPTY 0
+#define MTL_DEBUG_RXFSTS_BT 1
+#define MTL_DEBUG_RXFSTS_AT 2
+#define MTL_DEBUG_RXFSTS_FULL 3
+#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT 1
+#define MTL_DEBUG_RRCSTS_IDLE 0
+#define MTL_DEBUG_RRCSTS_RDATA 1
+#define MTL_DEBUG_RRCSTS_RSTAT 2
+#define MTL_DEBUG_RRCSTS_FLUSH 3
+#define MTL_DEBUG_RWCSTS BIT(0)
+
+/* MTL interrupt */
+#define MTL_RX_OVERFLOW_INT_EN BIT(24)
+#define MTL_RX_OVERFLOW_INT BIT(16)
+
+/* Default operating mode of the MAC */
+#define GMAC_CORE_INIT (GMAC_CONFIG_JD | GMAC_CONFIG_PS | \
+ GMAC_CONFIG_BE | GMAC_CONFIG_DCRS | \
+ GMAC_CONFIG_JE)
+
+/* To dump the core regs excluding the Address Registers */
+#define GMAC_REG_NUM 132
+
+/* MTL debug */
+#define MTL_DEBUG_TXSTSFSTS BIT(5)
+#define MTL_DEBUG_TXFSTS BIT(4)
+#define MTL_DEBUG_TWCSTS BIT(3)
+
+/* MTL debug: Tx FIFO Read Controller Status */
+#define MTL_DEBUG_TRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_TRCSTS_SHIFT 1
+#define MTL_DEBUG_TRCSTS_IDLE 0
+#define MTL_DEBUG_TRCSTS_READ 1
+#define MTL_DEBUG_TRCSTS_TXW 2
+#define MTL_DEBUG_TRCSTS_WRITE 3
+#define MTL_DEBUG_TXPAUSED BIT(0)
+
+/* MAC debug: GMII or MII Transmit Protocol Engine Status */
+#define MTL_DEBUG_RXFSTS_MASK GENMASK(5, 4)
+#define MTL_DEBUG_RXFSTS_SHIFT 4
+#define MTL_DEBUG_RXFSTS_EMPTY 0
+#define MTL_DEBUG_RXFSTS_BT 1
+#define MTL_DEBUG_RXFSTS_AT 2
+#define MTL_DEBUG_RXFSTS_FULL 3
+#define MTL_DEBUG_RRCSTS_MASK GENMASK(2, 1)
+#define MTL_DEBUG_RRCSTS_SHIFT 1
+#define MTL_DEBUG_RRCSTS_IDLE 0
+#define MTL_DEBUG_RRCSTS_RDATA 1
+#define MTL_DEBUG_RRCSTS_RSTAT 2
+#define MTL_DEBUG_RRCSTS_FLUSH 3
+#define MTL_DEBUG_RWCSTS BIT(0)
+
+/* SGMII/RGMII status register */
+#define GMAC_PHYIF_CTRLSTATUS_TC BIT(0)
+#define GMAC_PHYIF_CTRLSTATUS_LUD BIT(1)
+#define GMAC_PHYIF_CTRLSTATUS_SMIDRXS BIT(4)
+#define GMAC_PHYIF_CTRLSTATUS_LNKMOD BIT(16)
+#define GMAC_PHYIF_CTRLSTATUS_SPEED GENMASK(18, 17)
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT 17
+#define GMAC_PHYIF_CTRLSTATUS_LNKSTS BIT(19)
+#define GMAC_PHYIF_CTRLSTATUS_JABTO BIT(20)
+#define GMAC_PHYIF_CTRLSTATUS_FALSECARDET BIT(21)
+/* LNKMOD */
+#define GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK 0x1
+/* LNKSPEED */
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_125 0x2
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_25 0x1
+#define GMAC_PHYIF_CTRLSTATUS_SPEED_2_5 0x0
+
+extern const struct stmmac_dma_ops dwmac4_dma_ops;
+extern const struct stmmac_dma_ops dwmac410_dma_ops;
+#endif /* __DWMAC4_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
new file mode 100644
index 000000000..84276eb68
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -0,0 +1,1334 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.00 has been used for developing this code.
+ *
+ * This only implements the mac core functions for this chip.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/crc32.h>
+#include <linux/slab.h>
+#include <linux/ethtool.h>
+#include <linux/io.h>
+#include "stmmac.h"
+#include "stmmac_pcs.h"
+#include "dwmac4.h"
+#include "dwmac5.h"
+
+static void dwmac4_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+ u32 clk_rate;
+
+ value |= GMAC_CORE_INIT;
+
+ if (hw->ps) {
+ value |= GMAC_CONFIG_TE;
+
+ value &= hw->link.speed_mask;
+ switch (hw->ps) {
+ case SPEED_1000:
+ value |= hw->link.speed1000;
+ break;
+ case SPEED_100:
+ value |= hw->link.speed100;
+ break;
+ case SPEED_10:
+ value |= hw->link.speed10;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_CONFIG);
+
+ /* Configure LPI 1us counter to number of CSR clock ticks in 1us - 1 */
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+ writel((clk_rate / 1000000) - 1, ioaddr + GMAC4_MAC_ONEUS_TIC_COUNTER);
+
+ /* Enable GMAC interrupts */
+ value = GMAC_INT_DEFAULT_ENABLE;
+
+ if (hw->pcs)
+ value |= GMAC_PCS_IRQ_DEFAULT;
+
+ /* Enable FPE interrupt */
+ if ((GMAC_HW_FEAT_FPESEL & readl(ioaddr + GMAC_HW_FEATURE3)) >> 26)
+ value |= GMAC_INT_FPE_EN;
+
+ writel(value, ioaddr + GMAC_INT_EN);
+
+ if (GMAC_INT_DEFAULT_ENABLE & GMAC_INT_TSIE)
+ init_waitqueue_head(&priv->tstamp_busy_wait);
+}
+
+static void dwmac4_rx_queue_enable(struct mac_device_info *hw,
+ u8 mode, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_RXQ_CTRL0);
+
+ value &= GMAC_RX_QUEUE_CLEAR(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= GMAC_RX_AV_QUEUE_ENABLE(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= GMAC_RX_DCB_QUEUE_ENABLE(queue);
+
+ writel(value, ioaddr + GMAC_RXQ_CTRL0);
+}
+
+static void dwmac4_rx_queue_priority(struct mac_device_info *hw,
+ u32 prio, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 base_register;
+ u32 value;
+
+ base_register = (queue < 4) ? GMAC_RXQ_CTRL2 : GMAC_RXQ_CTRL3;
+ if (queue >= 4)
+ queue -= 4;
+
+ value = readl(ioaddr + base_register);
+
+ value &= ~GMAC_RXQCTRL_PSRQX_MASK(queue);
+ value |= (prio << GMAC_RXQCTRL_PSRQX_SHIFT(queue)) &
+ GMAC_RXQCTRL_PSRQX_MASK(queue);
+ writel(value, ioaddr + base_register);
+}
+
+static void dwmac4_tx_queue_priority(struct mac_device_info *hw,
+ u32 prio, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 base_register;
+ u32 value;
+
+ base_register = (queue < 4) ? GMAC_TXQ_PRTY_MAP0 : GMAC_TXQ_PRTY_MAP1;
+ if (queue >= 4)
+ queue -= 4;
+
+ value = readl(ioaddr + base_register);
+
+ value &= ~GMAC_TXQCTRL_PSTQX_MASK(queue);
+ value |= (prio << GMAC_TXQCTRL_PSTQX_SHIFT(queue)) &
+ GMAC_TXQCTRL_PSTQX_MASK(queue);
+
+ writel(value, ioaddr + base_register);
+}
+
+static void dwmac4_rx_queue_routing(struct mac_device_info *hw,
+ u8 packet, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ static const struct stmmac_rx_routing route_possibilities[] = {
+ { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT },
+ { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT },
+ { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT },
+ { GMAC_RXQCTRL_UPQ_MASK, GMAC_RXQCTRL_UPQ_SHIFT },
+ { GMAC_RXQCTRL_MCBCQ_MASK, GMAC_RXQCTRL_MCBCQ_SHIFT },
+ };
+
+ value = readl(ioaddr + GMAC_RXQ_CTRL1);
+
+ /* routing configuration */
+ value &= ~route_possibilities[packet - 1].reg_mask;
+ value |= (queue << route_possibilities[packet-1].reg_shift) &
+ route_possibilities[packet - 1].reg_mask;
+
+ /* some packets require extra ops */
+ if (packet == PACKET_AVCPQ) {
+ value &= ~GMAC_RXQCTRL_TACPQE;
+ value |= 0x1 << GMAC_RXQCTRL_TACPQE_SHIFT;
+ } else if (packet == PACKET_MCBCQ) {
+ value &= ~GMAC_RXQCTRL_MCBCQEN;
+ value |= 0x1 << GMAC_RXQCTRL_MCBCQEN_SHIFT;
+ }
+
+ writel(value, ioaddr + GMAC_RXQ_CTRL1);
+}
+
+static void dwmac4_prog_mtl_rx_algorithms(struct mac_device_info *hw,
+ u32 rx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+ value &= ~MTL_OPERATION_RAA;
+ switch (rx_alg) {
+ case MTL_RX_ALGORITHM_SP:
+ value |= MTL_OPERATION_RAA_SP;
+ break;
+ case MTL_RX_ALGORITHM_WSP:
+ value |= MTL_OPERATION_RAA_WSP;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + MTL_OPERATION_MODE);
+}
+
+static void dwmac4_prog_mtl_tx_algorithms(struct mac_device_info *hw,
+ u32 tx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_OPERATION_MODE);
+
+ value &= ~MTL_OPERATION_SCHALG_MASK;
+ switch (tx_alg) {
+ case MTL_TX_ALGORITHM_WRR:
+ value |= MTL_OPERATION_SCHALG_WRR;
+ break;
+ case MTL_TX_ALGORITHM_WFQ:
+ value |= MTL_OPERATION_SCHALG_WFQ;
+ break;
+ case MTL_TX_ALGORITHM_DWRR:
+ value |= MTL_OPERATION_SCHALG_DWRR;
+ break;
+ case MTL_TX_ALGORITHM_SP:
+ value |= MTL_OPERATION_SCHALG_SP;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + MTL_OPERATION_MODE);
+}
+
+static void dwmac4_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+ u32 weight, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+
+ value &= ~MTL_TXQ_WEIGHT_ISCQW_MASK;
+ value |= weight & MTL_TXQ_WEIGHT_ISCQW_MASK;
+ writel(value, ioaddr + MTL_TXQX_WEIGHT_BASE_ADDR(queue));
+}
+
+static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (queue < 4)
+ value = readl(ioaddr + MTL_RXQ_DMA_MAP0);
+ else
+ value = readl(ioaddr + MTL_RXQ_DMA_MAP1);
+
+ if (queue == 0 || queue == 4) {
+ value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK;
+ value |= MTL_RXQ_DMA_Q04MDMACH(chan);
+ } else if (queue > 4) {
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4);
+ } else {
+ value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue);
+ value |= MTL_RXQ_DMA_QXMDMACH(chan, queue);
+ }
+
+ if (queue < 4)
+ writel(value, ioaddr + MTL_RXQ_DMA_MAP0);
+ else
+ writel(value, ioaddr + MTL_RXQ_DMA_MAP1);
+}
+
+static void dwmac4_config_cbs(struct mac_device_info *hw,
+ u32 send_slope, u32 idle_slope,
+ u32 high_credit, u32 low_credit, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ pr_debug("Queue %d configured as AVB. Parameters:\n", queue);
+ pr_debug("\tsend_slope: 0x%08x\n", send_slope);
+ pr_debug("\tidle_slope: 0x%08x\n", idle_slope);
+ pr_debug("\thigh_credit: 0x%08x\n", high_credit);
+ pr_debug("\tlow_credit: 0x%08x\n", low_credit);
+
+ /* enable AV algorithm */
+ value = readl(ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+ value |= MTL_ETS_CTRL_AVALG;
+ value |= MTL_ETS_CTRL_CC;
+ writel(value, ioaddr + MTL_ETSX_CTRL_BASE_ADDR(queue));
+
+ /* configure send slope */
+ value = readl(ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_SEND_SLP_CRED_SSC_MASK;
+ value |= send_slope & MTL_SEND_SLP_CRED_SSC_MASK;
+ writel(value, ioaddr + MTL_SEND_SLP_CREDX_BASE_ADDR(queue));
+
+ /* configure idle slope (same register as tx weight) */
+ dwmac4_set_mtl_tx_queue_weight(hw, idle_slope, queue);
+
+ /* configure high credit */
+ value = readl(ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_HIGH_CRED_HC_MASK;
+ value |= high_credit & MTL_HIGH_CRED_HC_MASK;
+ writel(value, ioaddr + MTL_HIGH_CREDX_BASE_ADDR(queue));
+
+ /* configure high credit */
+ value = readl(ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+ value &= ~MTL_HIGH_CRED_LC_MASK;
+ value |= low_credit & MTL_HIGH_CRED_LC_MASK;
+ writel(value, ioaddr + MTL_LOW_CREDX_BASE_ADDR(queue));
+}
+
+static void dwmac4_dump_regs(struct mac_device_info *hw, u32 *reg_space)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ for (i = 0; i < GMAC_REG_NUM; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
+static int dwmac4_rx_ipc_enable(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (hw->rx_csum)
+ value |= GMAC_CONFIG_IPC;
+ else
+ value &= ~GMAC_CONFIG_IPC;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+
+ value = readl(ioaddr + GMAC_CONFIG);
+
+ return !!(value & GMAC_CONFIG_IPC);
+}
+
+static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ unsigned int pmt = 0;
+ u32 config;
+
+ if (mode & WAKE_MAGIC) {
+ pr_debug("GMAC: WOL Magic frame\n");
+ pmt |= power_down | magic_pkt_en;
+ }
+ if (mode & WAKE_UCAST) {
+ pr_debug("GMAC: WOL on global unicast\n");
+ pmt |= power_down | global_unicast | wake_up_frame_en;
+ }
+
+ if (pmt) {
+ /* The receiver must be enabled for WOL before powering down */
+ config = readl(ioaddr + GMAC_CONFIG);
+ config |= GMAC_CONFIG_RE;
+ writel(config, ioaddr + GMAC_CONFIG);
+ }
+ writel(pmt, ioaddr + GMAC_PMT);
+}
+
+static void dwmac4_set_umac_addr(struct mac_device_info *hw,
+ const unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_dwmac4_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ stmmac_dwmac4_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
+ GMAC_ADDR_LOW(reg_n));
+}
+
+static void dwmac4_set_eee_mode(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ /* Enable the link status receive on RGMII, SGMII ore SMII
+ * receive path and instruct the transmit to enter in LPI
+ * state.
+ */
+ value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+ value |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+
+ if (en_tx_lpi_clockgating)
+ value |= GMAC4_LPI_CTRL_STATUS_LPITCSE;
+
+ writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_reset_eee_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+ value &= ~(GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA);
+ writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ if (link)
+ value |= GMAC4_LPI_CTRL_STATUS_PLS;
+ else
+ value &= ~GMAC4_LPI_CTRL_STATUS_PLS;
+
+ writel(value, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_set_eee_lpi_entry_timer(struct mac_device_info *hw, int et)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int value = et & STMMAC_ET_MAX;
+ int regval;
+
+ /* Program LPI entry timer value into register */
+ writel(value, ioaddr + GMAC4_LPI_ENTRY_TIMER);
+
+ /* Enable/disable LPI entry timer */
+ regval = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+ regval |= GMAC4_LPI_CTRL_STATUS_LPIEN | GMAC4_LPI_CTRL_STATUS_LPITXA;
+
+ if (et)
+ regval |= GMAC4_LPI_CTRL_STATUS_LPIATE;
+ else
+ regval &= ~GMAC4_LPI_CTRL_STATUS_LPIATE;
+
+ writel(regval, ioaddr + GMAC4_LPI_CTRL_STATUS);
+}
+
+static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
+
+ /* Program the timers in the LPI timer control register:
+ * LS: minimum time (ms) for which the link
+ * status from PHY should be ok before transmitting
+ * the LPI pattern.
+ * TW: minimum time (us) for which the core waits
+ * after it has stopped transmitting the LPI pattern.
+ */
+ writel(value, ioaddr + GMAC4_LPI_TIMER_CTRL);
+}
+
+static void dwmac4_write_single_vlan(struct net_device *dev, u16 vid)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 val;
+
+ val = readl(ioaddr + GMAC_VLAN_TAG);
+ val &= ~GMAC_VLAN_TAG_VID;
+ val |= GMAC_VLAN_TAG_ETV | vid;
+
+ writel(val, ioaddr + GMAC_VLAN_TAG);
+}
+
+static int dwmac4_write_vlan_filter(struct net_device *dev,
+ struct mac_device_info *hw,
+ u8 index, u32 data)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ int i, timeout = 10;
+ u32 val;
+
+ if (index >= hw->num_vlan)
+ return -EINVAL;
+
+ writel(data, ioaddr + GMAC_VLAN_TAG_DATA);
+
+ val = readl(ioaddr + GMAC_VLAN_TAG);
+ val &= ~(GMAC_VLAN_TAG_CTRL_OFS_MASK |
+ GMAC_VLAN_TAG_CTRL_CT |
+ GMAC_VLAN_TAG_CTRL_OB);
+ val |= (index << GMAC_VLAN_TAG_CTRL_OFS_SHIFT) | GMAC_VLAN_TAG_CTRL_OB;
+
+ writel(val, ioaddr + GMAC_VLAN_TAG);
+
+ for (i = 0; i < timeout; i++) {
+ val = readl(ioaddr + GMAC_VLAN_TAG);
+ if (!(val & GMAC_VLAN_TAG_CTRL_OB))
+ return 0;
+ udelay(1);
+ }
+
+ netdev_err(dev, "Timeout accessing MAC_VLAN_Tag_Filter\n");
+
+ return -EBUSY;
+}
+
+static int dwmac4_add_hw_vlan_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int index = -1;
+ u32 val = 0;
+ int i, ret;
+
+ if (vid > 4095)
+ return -EINVAL;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ /* For single VLAN filter, VID 0 means VLAN promiscuous */
+ if (vid == 0) {
+ netdev_warn(dev, "Adding VLAN ID 0 is not supported\n");
+ return -EPERM;
+ }
+
+ if (hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) {
+ netdev_err(dev, "Only single VLAN ID supported\n");
+ return -EPERM;
+ }
+
+ hw->vlan_filter[0] = vid;
+ dwmac4_write_single_vlan(dev, vid);
+
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ val |= GMAC_VLAN_TAG_DATA_ETV | GMAC_VLAN_TAG_DATA_VEN | vid;
+
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] == val)
+ return 0;
+ else if (!(hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN))
+ index = i;
+ }
+
+ if (index == -1) {
+ netdev_err(dev, "MAC_VLAN_Tag_Filter full (size: %0u)\n",
+ hw->num_vlan);
+ return -EPERM;
+ }
+
+ ret = dwmac4_write_vlan_filter(dev, hw, index, val);
+
+ if (!ret)
+ hw->vlan_filter[index] = val;
+
+ return ret;
+}
+
+static int dwmac4_del_hw_vlan_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid)
+{
+ int i, ret = 0;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ if ((hw->vlan_filter[0] & GMAC_VLAN_TAG_VID) == vid) {
+ hw->vlan_filter[0] = 0;
+ dwmac4_write_single_vlan(dev, 0);
+ }
+ return 0;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if ((hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VID) == vid) {
+ ret = dwmac4_write_vlan_filter(dev, hw, i, 0);
+
+ if (!ret)
+ hw->vlan_filter[i] = 0;
+ else
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void dwmac4_restore_hw_vlan_rx_fltr(struct net_device *dev,
+ struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ u32 hash;
+ u32 val;
+ int i;
+
+ /* Single Rx VLAN Filter */
+ if (hw->num_vlan == 1) {
+ dwmac4_write_single_vlan(dev, hw->vlan_filter[0]);
+ return;
+ }
+
+ /* Extended Rx VLAN Filter Enable */
+ for (i = 0; i < hw->num_vlan; i++) {
+ if (hw->vlan_filter[i] & GMAC_VLAN_TAG_DATA_VEN) {
+ val = hw->vlan_filter[i];
+ dwmac4_write_vlan_filter(dev, hw, i, val);
+ }
+ }
+
+ hash = readl(ioaddr + GMAC_VLAN_HASH_TABLE);
+ if (hash & GMAC_VLAN_VLHT) {
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+ value |= GMAC_VLAN_VTHM;
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+ }
+}
+
+static void dwmac4_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ int numhashregs = (hw->multicast_filter_bins >> 5);
+ int mcbitslog2 = hw->mcast_bits_log2;
+ unsigned int value;
+ u32 mc_filter[8];
+ int i;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value &= ~GMAC_PACKET_FILTER_HMC;
+ value &= ~GMAC_PACKET_FILTER_HPF;
+ value &= ~GMAC_PACKET_FILTER_PCF;
+ value &= ~GMAC_PACKET_FILTER_PM;
+ value &= ~GMAC_PACKET_FILTER_PR;
+ value &= ~GMAC_PACKET_FILTER_RA;
+ if (dev->flags & IFF_PROMISC) {
+ /* VLAN Tag Filter Fail Packets Queuing */
+ if (hw->vlan_fail_q_en) {
+ value = readl(ioaddr + GMAC_RXQ_CTRL4);
+ value &= ~GMAC_RXQCTRL_VFFQ_MASK;
+ value |= GMAC_RXQCTRL_VFFQE |
+ (hw->vlan_fail_q << GMAC_RXQCTRL_VFFQ_SHIFT);
+ writel(value, ioaddr + GMAC_RXQ_CTRL4);
+ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_RA;
+ } else {
+ value = GMAC_PACKET_FILTER_PR | GMAC_PACKET_FILTER_PCF;
+ }
+
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
+ /* Pass all multi */
+ value |= GMAC_PACKET_FILTER_PM;
+ /* Set all the bits of the HASH tab */
+ memset(mc_filter, 0xff, sizeof(mc_filter));
+ } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
+ struct netdev_hw_addr *ha;
+
+ /* Hash filter for multicast */
+ value |= GMAC_PACKET_FILTER_HMC;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ /* The upper n bits of the calculated CRC are used to
+ * index the contents of the hash table. The number of
+ * bits used depends on the hardware configuration
+ * selected at core configuration time.
+ */
+ u32 bit_nr = bitrev32(~crc32_le(~0, ha->addr,
+ ETH_ALEN)) >> (32 - mcbitslog2);
+ /* The most significant bit determines the register to
+ * use (H/L) while the other 5 bits determine the bit
+ * within the register.
+ */
+ mc_filter[bit_nr >> 5] |= (1 << (bit_nr & 0x1f));
+ }
+ }
+
+ for (i = 0; i < numhashregs; i++)
+ writel(mc_filter[i], ioaddr + GMAC_HASH_TAB(i));
+
+ value |= GMAC_PACKET_FILTER_HPF;
+
+ /* Handle multiple unicast addresses */
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
+ /* Switch to promiscuous mode if more than 128 addrs
+ * are required
+ */
+ value |= GMAC_PACKET_FILTER_PR;
+ } else {
+ struct netdev_hw_addr *ha;
+ int reg = 1;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwmac4_set_umac_addr(hw, ha->addr, reg);
+ reg++;
+ }
+
+ while (reg < GMAC_MAX_PERFECT_ADDRESSES) {
+ writel(0, ioaddr + GMAC_ADDR_HIGH(reg));
+ writel(0, ioaddr + GMAC_ADDR_LOW(reg));
+ reg++;
+ }
+ }
+
+ /* VLAN filtering */
+ if (dev->flags & IFF_PROMISC && !hw->vlan_fail_q_en)
+ value &= ~GMAC_PACKET_FILTER_VTFE;
+ else if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ value |= GMAC_PACKET_FILTER_VTFE;
+
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+}
+
+static void dwmac4_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ unsigned int flow = 0;
+ u32 queue = 0;
+
+ pr_debug("GMAC Flow-Control:\n");
+ if (fc & FLOW_RX) {
+ pr_debug("\tReceive Flow-Control ON\n");
+ flow |= GMAC_RX_FLOW_CTRL_RFE;
+ } else {
+ pr_debug("\tReceive Flow-Control OFF\n");
+ }
+ writel(flow, ioaddr + GMAC_RX_FLOW_CTRL);
+
+ if (fc & FLOW_TX) {
+ pr_debug("\tTransmit Flow-Control ON\n");
+
+ if (duplex)
+ pr_debug("\tduplex mode: PAUSE %d\n", pause_time);
+
+ for (queue = 0; queue < tx_cnt; queue++) {
+ flow = GMAC_TX_FLOW_CTRL_TFE;
+
+ if (duplex)
+ flow |=
+ (pause_time << GMAC_TX_FLOW_CTRL_PT_SHIFT);
+
+ writel(flow, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
+ }
+ } else {
+ for (queue = 0; queue < tx_cnt; queue++)
+ writel(0, ioaddr + GMAC_QX_TX_FLOW_CTRL(queue));
+ }
+}
+
+static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback)
+{
+ dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback);
+}
+
+static void dwmac4_rane(void __iomem *ioaddr, bool restart)
+{
+ dwmac_rane(ioaddr, GMAC_PCS_BASE, restart);
+}
+
+static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv)
+{
+ dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv);
+}
+
+/* RGMII or SMII interface */
+static void dwmac4_phystatus(void __iomem *ioaddr, struct stmmac_extra_stats *x)
+{
+ u32 status;
+
+ status = readl(ioaddr + GMAC_PHYIF_CONTROL_STATUS);
+ x->irq_rgmii_n++;
+
+ /* Check the link status */
+ if (status & GMAC_PHYIF_CTRLSTATUS_LNKSTS) {
+ int speed_value;
+
+ x->pcs_link = 1;
+
+ speed_value = ((status & GMAC_PHYIF_CTRLSTATUS_SPEED) >>
+ GMAC_PHYIF_CTRLSTATUS_SPEED_SHIFT);
+ if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_125)
+ x->pcs_speed = SPEED_1000;
+ else if (speed_value == GMAC_PHYIF_CTRLSTATUS_SPEED_25)
+ x->pcs_speed = SPEED_100;
+ else
+ x->pcs_speed = SPEED_10;
+
+ x->pcs_duplex = (status & GMAC_PHYIF_CTRLSTATUS_LNKMOD_MASK);
+
+ pr_info("Link is Up - %d/%s\n", (int)x->pcs_speed,
+ x->pcs_duplex ? "Full" : "Half");
+ } else {
+ x->pcs_link = 0;
+ pr_info("Link is Down\n");
+ }
+}
+
+static int dwmac4_irq_mtl_status(struct mac_device_info *hw, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 mtl_int_qx_status;
+ int ret = 0;
+
+ mtl_int_qx_status = readl(ioaddr + MTL_INT_STATUS);
+
+ /* Check MTL Interrupt */
+ if (mtl_int_qx_status & MTL_INT_QX(chan)) {
+ /* read Queue x Interrupt status */
+ u32 status = readl(ioaddr + MTL_CHAN_INT_CTRL(chan));
+
+ if (status & MTL_RX_OVERFLOW_INT) {
+ /* clear Interrupt */
+ writel(status | MTL_RX_OVERFLOW_INT,
+ ioaddr + MTL_CHAN_INT_CTRL(chan));
+ ret = CORE_IRQ_MTL_RX_OVERFLOW;
+ }
+ }
+
+ return ret;
+}
+
+static int dwmac4_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
+ u32 intr_enable = readl(ioaddr + GMAC_INT_EN);
+ int ret = 0;
+
+ /* Discard disabled bits */
+ intr_status &= intr_enable;
+
+ /* Not used events (e.g. MMC interrupts) are not handled. */
+ if ((intr_status & mmc_tx_irq))
+ x->mmc_tx_irq_n++;
+ if (unlikely(intr_status & mmc_rx_irq))
+ x->mmc_rx_irq_n++;
+ if (unlikely(intr_status & mmc_rx_csum_offload_irq))
+ x->mmc_rx_csum_offload_irq_n++;
+ /* Clear the PMT bits 5 and 6 by reading the PMT status reg */
+ if (unlikely(intr_status & pmt_irq)) {
+ readl(ioaddr + GMAC_PMT);
+ x->irq_receive_pmt_irq_n++;
+ }
+
+ /* MAC tx/rx EEE LPI entry/exit interrupts */
+ if (intr_status & lpi_irq) {
+ /* Clear LPI interrupt by reading MAC_LPI_Control_Status */
+ u32 status = readl(ioaddr + GMAC4_LPI_CTRL_STATUS);
+
+ if (status & GMAC4_LPI_CTRL_STATUS_TLPIEN) {
+ ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
+ x->irq_tx_path_in_lpi_mode_n++;
+ }
+ if (status & GMAC4_LPI_CTRL_STATUS_TLPIEX) {
+ ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
+ x->irq_tx_path_exit_lpi_mode_n++;
+ }
+ if (status & GMAC4_LPI_CTRL_STATUS_RLPIEN)
+ x->irq_rx_path_in_lpi_mode_n++;
+ if (status & GMAC4_LPI_CTRL_STATUS_RLPIEX)
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+
+ dwmac_pcs_isr(ioaddr, GMAC_PCS_BASE, intr_status, x);
+ if (intr_status & PCS_RGSMIIIS_IRQ)
+ dwmac4_phystatus(ioaddr, x);
+
+ return ret;
+}
+
+static void dwmac4_debug(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues)
+{
+ u32 value;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues; queue++) {
+ value = readl(ioaddr + MTL_CHAN_TX_DEBUG(queue));
+
+ if (value & MTL_DEBUG_TXSTSFSTS)
+ x->mtl_tx_status_fifo_full++;
+ if (value & MTL_DEBUG_TXFSTS)
+ x->mtl_tx_fifo_not_empty++;
+ if (value & MTL_DEBUG_TWCSTS)
+ x->mmtl_fifo_ctrl++;
+ if (value & MTL_DEBUG_TRCSTS_MASK) {
+ u32 trcsts = (value & MTL_DEBUG_TRCSTS_MASK)
+ >> MTL_DEBUG_TRCSTS_SHIFT;
+ if (trcsts == MTL_DEBUG_TRCSTS_WRITE)
+ x->mtl_tx_fifo_read_ctrl_write++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_TXW)
+ x->mtl_tx_fifo_read_ctrl_wait++;
+ else if (trcsts == MTL_DEBUG_TRCSTS_READ)
+ x->mtl_tx_fifo_read_ctrl_read++;
+ else
+ x->mtl_tx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_TXPAUSED)
+ x->mac_tx_in_pause++;
+ }
+
+ for (queue = 0; queue < rx_queues; queue++) {
+ value = readl(ioaddr + MTL_CHAN_RX_DEBUG(queue));
+
+ if (value & MTL_DEBUG_RXFSTS_MASK) {
+ u32 rxfsts = (value & MTL_DEBUG_RXFSTS_MASK)
+ >> MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rxfsts == MTL_DEBUG_RXFSTS_FULL)
+ x->mtl_rx_fifo_fill_level_full++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_AT)
+ x->mtl_rx_fifo_fill_above_thresh++;
+ else if (rxfsts == MTL_DEBUG_RXFSTS_BT)
+ x->mtl_rx_fifo_fill_below_thresh++;
+ else
+ x->mtl_rx_fifo_fill_level_empty++;
+ }
+ if (value & MTL_DEBUG_RRCSTS_MASK) {
+ u32 rrcsts = (value & MTL_DEBUG_RRCSTS_MASK) >>
+ MTL_DEBUG_RRCSTS_SHIFT;
+
+ if (rrcsts == MTL_DEBUG_RRCSTS_FLUSH)
+ x->mtl_rx_fifo_read_ctrl_flush++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RSTAT)
+ x->mtl_rx_fifo_read_ctrl_read_data++;
+ else if (rrcsts == MTL_DEBUG_RRCSTS_RDATA)
+ x->mtl_rx_fifo_read_ctrl_status++;
+ else
+ x->mtl_rx_fifo_read_ctrl_idle++;
+ }
+ if (value & MTL_DEBUG_RWCSTS)
+ x->mtl_rx_fifo_ctrl_active++;
+ }
+
+ /* GMAC debug */
+ value = readl(ioaddr + GMAC_DEBUG);
+
+ if (value & GMAC_DEBUG_TFCSTS_MASK) {
+ u32 tfcsts = (value & GMAC_DEBUG_TFCSTS_MASK)
+ >> GMAC_DEBUG_TFCSTS_SHIFT;
+
+ if (tfcsts == GMAC_DEBUG_TFCSTS_XFER)
+ x->mac_tx_frame_ctrl_xfer++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_GEN_PAUSE)
+ x->mac_tx_frame_ctrl_pause++;
+ else if (tfcsts == GMAC_DEBUG_TFCSTS_WAIT)
+ x->mac_tx_frame_ctrl_wait++;
+ else
+ x->mac_tx_frame_ctrl_idle++;
+ }
+ if (value & GMAC_DEBUG_TPESTS)
+ x->mac_gmii_tx_proto_engine++;
+ if (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ x->mac_rx_frame_ctrl_fifo = (value & GMAC_DEBUG_RFCFCSTS_MASK)
+ >> GMAC_DEBUG_RFCFCSTS_SHIFT;
+ if (value & GMAC_DEBUG_RPESTS)
+ x->mac_gmii_rx_proto_engine++;
+}
+
+static void dwmac4_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ if (enable)
+ value |= GMAC_CONFIG_LM;
+ else
+ value &= ~GMAC_CONFIG_LM;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+static void dwmac4_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ __le16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(hash, ioaddr + GMAC_VLAN_HASH_TABLE);
+
+ value = readl(ioaddr + GMAC_VLAN_TAG);
+
+ if (hash) {
+ value |= GMAC_VLAN_VTHM | GMAC_VLAN_ETV;
+ if (is_double) {
+ value |= GMAC_VLAN_EDVLP;
+ value |= GMAC_VLAN_ESVL;
+ value |= GMAC_VLAN_DOVLTC;
+ }
+
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = GMAC_VLAN_ETV;
+
+ if (is_double) {
+ value |= GMAC_VLAN_EDVLP;
+ value |= GMAC_VLAN_ESVL;
+ value |= GMAC_VLAN_DOVLTC;
+ }
+
+ writel(value | perfect_match, ioaddr + GMAC_VLAN_TAG);
+ } else {
+ value &= ~(GMAC_VLAN_VTHM | GMAC_VLAN_ETV);
+ value &= ~(GMAC_VLAN_EDVLP | GMAC_VLAN_ESVL);
+ value &= ~GMAC_VLAN_DOVLTC;
+ value &= ~GMAC_VLAN_VID;
+
+ writel(value, ioaddr + GMAC_VLAN_TAG);
+ }
+}
+
+static void dwmac4_sarc_configure(void __iomem *ioaddr, int val)
+{
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+
+ value &= ~GMAC_CONFIG_SARC;
+ value |= val << GMAC_CONFIG_SARC_SHIFT;
+
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+static void dwmac4_enable_vlan(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC_VLAN_INCL);
+ value |= GMAC_VLAN_VLTI;
+ value |= GMAC_VLAN_CSVL; /* Only use SVLAN */
+ value &= ~GMAC_VLAN_VLC;
+ value |= (type << GMAC_VLAN_VLC_SHIFT) & GMAC_VLAN_VLC;
+ writel(value, ioaddr + GMAC_VLAN_INCL);
+}
+
+static void dwmac4_set_arp_offload(struct mac_device_info *hw, bool en,
+ u32 addr)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(addr, ioaddr + GMAC_ARP_ADDR);
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ if (en)
+ value |= GMAC_CONFIG_ARPEN;
+ else
+ value &= ~GMAC_CONFIG_ARPEN;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+static int dwmac4_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool ipv6, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value |= GMAC_PACKET_FILTER_IPFE;
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ /* For IPv6 not both SA/DA filters can be active */
+ if (ipv6) {
+ value |= GMAC_L3PEN0;
+ value &= ~(GMAC_L3SAM0 | GMAC_L3SAIM0);
+ value &= ~(GMAC_L3DAM0 | GMAC_L3DAIM0);
+ if (sa) {
+ value |= GMAC_L3SAM0;
+ if (inv)
+ value |= GMAC_L3SAIM0;
+ } else {
+ value |= GMAC_L3DAM0;
+ if (inv)
+ value |= GMAC_L3DAIM0;
+ }
+ } else {
+ value &= ~GMAC_L3PEN0;
+ if (sa) {
+ value |= GMAC_L3SAM0;
+ if (inv)
+ value |= GMAC_L3SAIM0;
+ } else {
+ value |= GMAC_L3DAM0;
+ if (inv)
+ value |= GMAC_L3DAIM0;
+ }
+ }
+
+ writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ if (sa) {
+ writel(match, ioaddr + GMAC_L3_ADDR0(filter_no));
+ } else {
+ writel(match, ioaddr + GMAC_L3_ADDR1(filter_no));
+ }
+
+ if (!en)
+ writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ return 0;
+}
+
+static int dwmac4_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool udp, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + GMAC_PACKET_FILTER);
+ value |= GMAC_PACKET_FILTER_IPFE;
+ writel(value, ioaddr + GMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + GMAC_L3L4_CTRL(filter_no));
+ if (udp) {
+ value |= GMAC_L4PEN0;
+ } else {
+ value &= ~GMAC_L4PEN0;
+ }
+
+ value &= ~(GMAC_L4SPM0 | GMAC_L4SPIM0);
+ value &= ~(GMAC_L4DPM0 | GMAC_L4DPIM0);
+ if (sa) {
+ value |= GMAC_L4SPM0;
+ if (inv)
+ value |= GMAC_L4SPIM0;
+ } else {
+ value |= GMAC_L4DPM0;
+ if (inv)
+ value |= GMAC_L4DPIM0;
+ }
+
+ writel(value, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ if (sa) {
+ value = match & GMAC_L4SP0;
+ } else {
+ value = (match << GMAC_L4DP0_SHIFT) & GMAC_L4DP0;
+ }
+
+ writel(value, ioaddr + GMAC_L4_ADDR(filter_no));
+
+ if (!en)
+ writel(0, ioaddr + GMAC_L3L4_CTRL(filter_no));
+
+ return 0;
+}
+
+const struct stmmac_ops dwmac4_ops = {
+ .core_init = dwmac4_core_init,
+ .set_mac = stmmac_set_mac,
+ .rx_ipc = dwmac4_rx_ipc_enable,
+ .rx_queue_enable = dwmac4_rx_queue_enable,
+ .rx_queue_prio = dwmac4_rx_queue_priority,
+ .tx_queue_prio = dwmac4_tx_queue_priority,
+ .rx_queue_routing = dwmac4_rx_queue_routing,
+ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwmac4_map_mtl_dma,
+ .config_cbs = dwmac4_config_cbs,
+ .dump_regs = dwmac4_dump_regs,
+ .host_irq_status = dwmac4_irq_status,
+ .host_mtl_irq_status = dwmac4_irq_mtl_status,
+ .flow_ctrl = dwmac4_flow_ctrl,
+ .pmt = dwmac4_pmt,
+ .set_umac_addr = dwmac4_set_umac_addr,
+ .get_umac_addr = dwmac4_get_umac_addr,
+ .set_eee_mode = dwmac4_set_eee_mode,
+ .reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_eee_timer = dwmac4_set_eee_timer,
+ .set_eee_pls = dwmac4_set_eee_pls,
+ .pcs_ctrl_ane = dwmac4_ctrl_ane,
+ .pcs_rane = dwmac4_rane,
+ .pcs_get_adv_lp = dwmac4_get_adv_lp,
+ .debug = dwmac4_debug,
+ .set_filter = dwmac4_set_filter,
+ .set_mac_loopback = dwmac4_set_mac_loopback,
+ .update_vlan_hash = dwmac4_update_vlan_hash,
+ .sarc_configure = dwmac4_sarc_configure,
+ .enable_vlan = dwmac4_enable_vlan,
+ .set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
+ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
+ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
+ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+};
+
+const struct stmmac_ops dwmac410_ops = {
+ .core_init = dwmac4_core_init,
+ .set_mac = stmmac_dwmac4_set_mac,
+ .rx_ipc = dwmac4_rx_ipc_enable,
+ .rx_queue_enable = dwmac4_rx_queue_enable,
+ .rx_queue_prio = dwmac4_rx_queue_priority,
+ .tx_queue_prio = dwmac4_tx_queue_priority,
+ .rx_queue_routing = dwmac4_rx_queue_routing,
+ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwmac4_map_mtl_dma,
+ .config_cbs = dwmac4_config_cbs,
+ .dump_regs = dwmac4_dump_regs,
+ .host_irq_status = dwmac4_irq_status,
+ .host_mtl_irq_status = dwmac4_irq_mtl_status,
+ .flow_ctrl = dwmac4_flow_ctrl,
+ .pmt = dwmac4_pmt,
+ .set_umac_addr = dwmac4_set_umac_addr,
+ .get_umac_addr = dwmac4_get_umac_addr,
+ .set_eee_mode = dwmac4_set_eee_mode,
+ .reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_eee_timer = dwmac4_set_eee_timer,
+ .set_eee_pls = dwmac4_set_eee_pls,
+ .pcs_ctrl_ane = dwmac4_ctrl_ane,
+ .pcs_rane = dwmac4_rane,
+ .pcs_get_adv_lp = dwmac4_get_adv_lp,
+ .debug = dwmac4_debug,
+ .set_filter = dwmac4_set_filter,
+ .flex_pps_config = dwmac5_flex_pps_config,
+ .set_mac_loopback = dwmac4_set_mac_loopback,
+ .update_vlan_hash = dwmac4_update_vlan_hash,
+ .sarc_configure = dwmac4_sarc_configure,
+ .enable_vlan = dwmac4_enable_vlan,
+ .set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
+ .est_configure = dwmac5_est_configure,
+ .est_irq_status = dwmac5_est_irq_status,
+ .fpe_configure = dwmac5_fpe_configure,
+ .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
+ .fpe_irq_status = dwmac5_fpe_irq_status,
+ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
+ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
+ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+};
+
+const struct stmmac_ops dwmac510_ops = {
+ .core_init = dwmac4_core_init,
+ .set_mac = stmmac_dwmac4_set_mac,
+ .rx_ipc = dwmac4_rx_ipc_enable,
+ .rx_queue_enable = dwmac4_rx_queue_enable,
+ .rx_queue_prio = dwmac4_rx_queue_priority,
+ .tx_queue_prio = dwmac4_tx_queue_priority,
+ .rx_queue_routing = dwmac4_rx_queue_routing,
+ .prog_mtl_rx_algorithms = dwmac4_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwmac4_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwmac4_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwmac4_map_mtl_dma,
+ .config_cbs = dwmac4_config_cbs,
+ .dump_regs = dwmac4_dump_regs,
+ .host_irq_status = dwmac4_irq_status,
+ .host_mtl_irq_status = dwmac4_irq_mtl_status,
+ .flow_ctrl = dwmac4_flow_ctrl,
+ .pmt = dwmac4_pmt,
+ .set_umac_addr = dwmac4_set_umac_addr,
+ .get_umac_addr = dwmac4_get_umac_addr,
+ .set_eee_mode = dwmac4_set_eee_mode,
+ .reset_eee_mode = dwmac4_reset_eee_mode,
+ .set_eee_lpi_entry_timer = dwmac4_set_eee_lpi_entry_timer,
+ .set_eee_timer = dwmac4_set_eee_timer,
+ .set_eee_pls = dwmac4_set_eee_pls,
+ .pcs_ctrl_ane = dwmac4_ctrl_ane,
+ .pcs_rane = dwmac4_rane,
+ .pcs_get_adv_lp = dwmac4_get_adv_lp,
+ .debug = dwmac4_debug,
+ .set_filter = dwmac4_set_filter,
+ .safety_feat_config = dwmac5_safety_feat_config,
+ .safety_feat_irq_status = dwmac5_safety_feat_irq_status,
+ .safety_feat_dump = dwmac5_safety_feat_dump,
+ .rxp_config = dwmac5_rxp_config,
+ .flex_pps_config = dwmac5_flex_pps_config,
+ .set_mac_loopback = dwmac4_set_mac_loopback,
+ .update_vlan_hash = dwmac4_update_vlan_hash,
+ .sarc_configure = dwmac4_sarc_configure,
+ .enable_vlan = dwmac4_enable_vlan,
+ .set_arp_offload = dwmac4_set_arp_offload,
+ .config_l3_filter = dwmac4_config_l3_filter,
+ .config_l4_filter = dwmac4_config_l4_filter,
+ .est_configure = dwmac5_est_configure,
+ .est_irq_status = dwmac5_est_irq_status,
+ .fpe_configure = dwmac5_fpe_configure,
+ .fpe_send_mpacket = dwmac5_fpe_send_mpacket,
+ .fpe_irq_status = dwmac5_fpe_irq_status,
+ .add_hw_vlan_rx_fltr = dwmac4_add_hw_vlan_rx_fltr,
+ .del_hw_vlan_rx_fltr = dwmac4_del_hw_vlan_rx_fltr,
+ .restore_hw_vlan_rx_fltr = dwmac4_restore_hw_vlan_rx_fltr,
+};
+
+static u32 dwmac4_get_num_vlan(void __iomem *ioaddr)
+{
+ u32 val, num_vlan;
+
+ val = readl(ioaddr + GMAC_HW_FEATURE3);
+ switch (val & GMAC_HW_FEAT_NRVF) {
+ case 0:
+ num_vlan = 1;
+ break;
+ case 1:
+ num_vlan = 4;
+ break;
+ case 2:
+ num_vlan = 8;
+ break;
+ case 3:
+ num_vlan = 16;
+ break;
+ case 4:
+ num_vlan = 24;
+ break;
+ case 5:
+ num_vlan = 32;
+ break;
+ default:
+ num_vlan = 1;
+ }
+
+ return num_vlan;
+}
+
+int dwmac4_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tDWMAC4/5\n");
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->link.duplex = GMAC_CONFIG_DM;
+ mac->link.speed10 = GMAC_CONFIG_PS;
+ mac->link.speed100 = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
+ mac->link.speed1000 = 0;
+ mac->link.speed2500 = GMAC_CONFIG_FES;
+ mac->link.speed_mask = GMAC_CONFIG_FES | GMAC_CONFIG_PS;
+ mac->mii.addr = GMAC_MDIO_ADDR;
+ mac->mii.data = GMAC_MDIO_DATA;
+ mac->mii.addr_shift = 21;
+ mac->mii.addr_mask = GENMASK(25, 21);
+ mac->mii.reg_shift = 16;
+ mac->mii.reg_mask = GENMASK(20, 16);
+ mac->mii.clk_csr_shift = 8;
+ mac->mii.clk_csr_mask = GENMASK(11, 8);
+ mac->num_vlan = dwmac4_get_num_vlan(priv->ioaddr);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
new file mode 100644
index 000000000..8cc80b1db
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.c
@@ -0,0 +1,585 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This contains the functions to handle the descriptors for DesignWare databook
+ * 4.xx.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "dwmac4.h"
+#include "dwmac4_descs.h"
+
+static int dwmac4_wrback_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p,
+ void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes3;
+ int ret = tx_done;
+
+ tdes3 = le32_to_cpu(p->des3);
+
+ /* Get tx owner first */
+ if (unlikely(tdes3 & TDES3_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes3 & TDES3_LAST_DESCRIPTOR)))
+ return tx_not_ls;
+
+ if (unlikely(tdes3 & TDES3_ERROR_SUMMARY)) {
+ ret = tx_err;
+
+ if (unlikely(tdes3 & TDES3_JABBER_TIMEOUT))
+ x->tx_jabber++;
+ if (unlikely(tdes3 & TDES3_PACKET_FLUSHED))
+ x->tx_frame_flushed++;
+ if (unlikely(tdes3 & TDES3_LOSS_CARRIER)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(tdes3 & TDES3_NO_CARRIER)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((tdes3 & TDES3_LATE_COLLISION) ||
+ (tdes3 & TDES3_EXCESSIVE_COLLISION)))
+ stats->collisions +=
+ (tdes3 & TDES3_COLLISION_COUNT_MASK)
+ >> TDES3_COLLISION_COUNT_SHIFT;
+
+ if (unlikely(tdes3 & TDES3_EXCESSIVE_DEFERRAL))
+ x->tx_deferred++;
+
+ if (unlikely(tdes3 & TDES3_UNDERFLOW_ERROR)) {
+ x->tx_underflow++;
+ ret |= tx_err_bump_tc;
+ }
+
+ if (unlikely(tdes3 & TDES3_IP_HDR_ERROR))
+ x->tx_ip_header_error++;
+
+ if (unlikely(tdes3 & TDES3_PAYLOAD_ERROR))
+ x->tx_payload_error++;
+ }
+
+ if (unlikely(tdes3 & TDES3_DEFERRED))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int dwmac4_wrback_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int rdes1 = le32_to_cpu(p->des1);
+ unsigned int rdes2 = le32_to_cpu(p->des2);
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ int message_type;
+ int ret = good_frame;
+
+ if (unlikely(rdes3 & RDES3_OWN))
+ return dma_own;
+
+ if (unlikely(rdes3 & RDES3_CONTEXT_DESCRIPTOR))
+ return discard_frame;
+ if (likely(!(rdes3 & RDES3_LAST_DESCRIPTOR)))
+ return rx_not_ls;
+
+ if (unlikely(rdes3 & RDES3_ERROR_SUMMARY)) {
+ if (unlikely(rdes3 & RDES3_GIANT_PACKET))
+ stats->rx_length_errors++;
+ if (unlikely(rdes3 & RDES3_OVERFLOW_ERROR))
+ x->rx_gmac_overflow++;
+
+ if (unlikely(rdes3 & RDES3_RECEIVE_WATCHDOG))
+ x->rx_watchdog++;
+
+ if (unlikely(rdes3 & RDES3_RECEIVE_ERROR))
+ x->rx_mii++;
+
+ if (unlikely(rdes3 & RDES3_CRC_ERROR)) {
+ x->rx_crc_errors++;
+ stats->rx_crc_errors++;
+ }
+
+ if (unlikely(rdes3 & RDES3_DRIBBLE_ERROR))
+ x->dribbling_bit++;
+
+ ret = discard_frame;
+ }
+
+ message_type = (rdes1 & ERDES4_MSG_TYPE_MASK) >> 8;
+
+ if (rdes1 & RDES1_IP_HDR_ERROR)
+ x->ip_hdr_err++;
+ if (rdes1 & RDES1_IP_CSUM_BYPASSED)
+ x->ip_csum_bypassed++;
+ if (rdes1 & RDES1_IPV4_HEADER)
+ x->ipv4_pkt_rcvd++;
+ if (rdes1 & RDES1_IPV6_HEADER)
+ x->ipv6_pkt_rcvd++;
+
+ if (message_type == RDES_EXT_NO_PTP)
+ x->no_ptp_rx_msg_type_ext++;
+ else if (message_type == RDES_EXT_SYNC)
+ x->ptp_rx_msg_type_sync++;
+ else if (message_type == RDES_EXT_FOLLOW_UP)
+ x->ptp_rx_msg_type_follow_up++;
+ else if (message_type == RDES_EXT_DELAY_REQ)
+ x->ptp_rx_msg_type_delay_req++;
+ else if (message_type == RDES_EXT_DELAY_RESP)
+ x->ptp_rx_msg_type_delay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_REQ)
+ x->ptp_rx_msg_type_pdelay_req++;
+ else if (message_type == RDES_EXT_PDELAY_RESP)
+ x->ptp_rx_msg_type_pdelay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ x->ptp_rx_msg_type_pdelay_follow_up++;
+ else if (message_type == RDES_PTP_ANNOUNCE)
+ x->ptp_rx_msg_type_announce++;
+ else if (message_type == RDES_PTP_MANAGEMENT)
+ x->ptp_rx_msg_type_management++;
+ else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+ x->ptp_rx_msg_pkt_reserved_type++;
+
+ if (rdes1 & RDES1_PTP_PACKET_TYPE)
+ x->ptp_frame_type++;
+ if (rdes1 & RDES1_PTP_VER)
+ x->ptp_ver++;
+ if (rdes1 & RDES1_TIMESTAMP_DROPPED)
+ x->timestamp_dropped++;
+
+ if (unlikely(rdes2 & RDES2_SA_FILTER_FAIL)) {
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(rdes2 & RDES2_DA_FILTER_FAIL)) {
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+
+ if (rdes2 & RDES2_L3_FILTER_MATCH)
+ x->l3_filter_match++;
+ if (rdes2 & RDES2_L4_FILTER_MATCH)
+ x->l4_filter_match++;
+ if ((rdes2 & RDES2_L3_L4_FILT_NB_MATCH_MASK)
+ >> RDES2_L3_L4_FILT_NB_MATCH_SHIFT)
+ x->l3_l4_filter_no_match++;
+
+ return ret;
+}
+
+static int dwmac4_rd_get_tx_len(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des2) & TDES2_BUFFER1_SIZE_MASK);
+}
+
+static int dwmac4_get_tx_owner(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & TDES3_OWN) >> TDES3_OWN_SHIFT;
+}
+
+static void dwmac4_set_tx_owner(struct dma_desc *p)
+{
+ p->des3 |= cpu_to_le32(TDES3_OWN);
+}
+
+static void dwmac4_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
+{
+ p->des3 |= cpu_to_le32(RDES3_OWN | RDES3_BUFFER1_VALID_ADDR);
+
+ if (!disable_rx_ic)
+ p->des3 |= cpu_to_le32(RDES3_INT_ON_COMPLETION_EN);
+}
+
+static int dwmac4_get_tx_ls(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & TDES3_LAST_DESCRIPTOR)
+ >> TDES3_LAST_DESCRIPTOR_SHIFT;
+}
+
+static int dwmac4_wrback_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+{
+ return (le32_to_cpu(p->des3) & RDES3_PACKET_SIZE_MASK);
+}
+
+static void dwmac4_rd_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(TDES2_TIMESTAMP_ENABLE);
+}
+
+static int dwmac4_wrback_get_tx_timestamp_status(struct dma_desc *p)
+{
+ /* Context type from W/B descriptor must be zero */
+ if (le32_to_cpu(p->des3) & TDES3_CONTEXT_TYPE)
+ return 0;
+
+ /* Tx Timestamp Status is 1 so des0 and des1'll have valid values */
+ if (le32_to_cpu(p->des3) & TDES3_TIMESTAMP_STATUS)
+ return 1;
+
+ return 0;
+}
+
+static inline void dwmac4_get_timestamp(void *desc, u32 ats, u64 *ts)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns;
+
+ ns = le32_to_cpu(p->des0);
+ /* convert high/sec time stamp value to nanosecond */
+ ns += le32_to_cpu(p->des1) * 1000000000ULL;
+
+ *ts = ns;
+}
+
+static int dwmac4_rx_check_timestamp(void *desc)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes0 = le32_to_cpu(p->des0);
+ unsigned int rdes1 = le32_to_cpu(p->des1);
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 own, ctxt;
+ int ret = 1;
+
+ own = rdes3 & RDES3_OWN;
+ ctxt = ((rdes3 & RDES3_CONTEXT_DESCRIPTOR)
+ >> RDES3_CONTEXT_DESCRIPTOR_SHIFT);
+
+ if (likely(!own && ctxt)) {
+ if ((rdes0 == 0xffffffff) && (rdes1 == 0xffffffff))
+ /* Corrupted value */
+ ret = -EINVAL;
+ else
+ /* A valid Timestamp is ready to be read */
+ ret = 0;
+ }
+
+ /* Timestamp not ready */
+ return ret;
+}
+
+static int dwmac4_wrback_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ int ret = -EINVAL;
+
+ /* Get the status from normal w/b descriptor */
+ if (likely(le32_to_cpu(p->des3) & RDES3_RDES1_VALID)) {
+ if (likely(le32_to_cpu(p->des1) & RDES1_TIMESTAMP_AVAILABLE)) {
+ int i = 0;
+
+ /* Check if timestamp is OK from context descriptor */
+ do {
+ ret = dwmac4_rx_check_timestamp(next_desc);
+ if (ret < 0)
+ goto exit;
+ i++;
+
+ } while ((ret == 1) && (i < 10));
+
+ if (i == 10)
+ ret = -EBUSY;
+ }
+ }
+exit:
+ if (likely(ret == 0))
+ return 1;
+
+ return 0;
+}
+
+static void dwmac4_rd_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end, int bfsize)
+{
+ dwmac4_set_rx_owner(p, disable_rx_ic);
+}
+
+static void dwmac4_rd_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwmac4_rd_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls, unsigned int tot_pkt_len)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ p->des2 |= cpu_to_le32(len & TDES2_BUFFER1_SIZE_MASK);
+
+ tdes3 |= tot_pkt_len & TDES3_PACKET_SIZE_MASK;
+ if (is_fs)
+ tdes3 |= TDES3_FIRST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+
+ if (likely(csum_flag))
+ tdes3 |= (TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+ else
+ tdes3 &= ~(TX_CIC_FULL << TDES3_CHECKSUM_INSERTION_SHIFT);
+
+ if (ls)
+ tdes3 |= TDES3_LAST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwmac4_rd_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+ int len1, int len2, bool tx_own,
+ bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ if (len1)
+ p->des2 |= cpu_to_le32((len1 & TDES2_BUFFER1_SIZE_MASK));
+
+ if (len2)
+ p->des2 |= cpu_to_le32((len2 << TDES2_BUFFER2_SIZE_MASK_SHIFT)
+ & TDES2_BUFFER2_SIZE_MASK);
+
+ if (is_fs) {
+ tdes3 |= TDES3_FIRST_DESCRIPTOR |
+ TDES3_TCP_SEGMENTATION_ENABLE |
+ ((tcphdrlen << TDES3_HDR_LEN_SHIFT) &
+ TDES3_SLOT_NUMBER_MASK) |
+ ((tcppayloadlen & TDES3_TCP_PKT_PAYLOAD_MASK));
+ } else {
+ tdes3 &= ~TDES3_FIRST_DESCRIPTOR;
+ }
+
+ if (ls)
+ tdes3 |= TDES3_LAST_DESCRIPTOR;
+ else
+ tdes3 &= ~TDES3_LAST_DESCRIPTOR;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwmac4_release_tx_desc(struct dma_desc *p, int mode)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwmac4_rd_set_tx_ic(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(TDES2_INTERRUPT_ON_COMPLETION);
+}
+
+static void dwmac4_display_ring(void *head, unsigned int size, bool rx,
+ dma_addr_t dma_rx_phy, unsigned int desc_size)
+{
+ dma_addr_t dma_addr;
+ int i;
+
+ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+ if (desc_size == sizeof(struct dma_desc)) {
+ struct dma_desc *p = (struct dma_desc *)head;
+
+ for (i = 0; i < size; i++) {
+ dma_addr = dma_rx_phy + i * sizeof(*p);
+ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ p++;
+ }
+ } else if (desc_size == sizeof(struct dma_extended_desc)) {
+ struct dma_extended_desc *extp = (struct dma_extended_desc *)head;
+
+ for (i = 0; i < size; i++) {
+ dma_addr = dma_rx_phy + i * sizeof(*extp);
+ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(extp->basic.des0), le32_to_cpu(extp->basic.des1),
+ le32_to_cpu(extp->basic.des2), le32_to_cpu(extp->basic.des3),
+ le32_to_cpu(extp->des4), le32_to_cpu(extp->des5),
+ le32_to_cpu(extp->des6), le32_to_cpu(extp->des7));
+ extp++;
+ }
+ } else if (desc_size == sizeof(struct dma_edesc)) {
+ struct dma_edesc *ep = (struct dma_edesc *)head;
+
+ for (i = 0; i < size; i++) {
+ dma_addr = dma_rx_phy + i * sizeof(*ep);
+ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(ep->des4), le32_to_cpu(ep->des5),
+ le32_to_cpu(ep->des6), le32_to_cpu(ep->des7),
+ le32_to_cpu(ep->basic.des0), le32_to_cpu(ep->basic.des1),
+ le32_to_cpu(ep->basic.des2), le32_to_cpu(ep->basic.des3));
+ ep++;
+ }
+ } else {
+ pr_err("unsupported descriptor!");
+ }
+}
+
+static void dwmac4_set_mss_ctxt(struct dma_desc *p, unsigned int mss)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = cpu_to_le32(mss);
+ p->des3 = cpu_to_le32(TDES3_CONTEXT_TYPE | TDES3_CTXT_TCMSSV);
+}
+
+static void dwmac4_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des0 = cpu_to_le32(lower_32_bits(addr));
+ p->des1 = cpu_to_le32(upper_32_bits(addr));
+}
+
+static void dwmac4_clear(struct dma_desc *p)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwmac4_set_sarc(struct dma_desc *p, u32 sarc_type)
+{
+ sarc_type <<= TDES3_SA_INSERT_CTRL_SHIFT;
+
+ p->des3 |= cpu_to_le32(sarc_type & TDES3_SA_INSERT_CTRL_MASK);
+}
+
+static int set_16kib_bfsize(int mtu)
+{
+ int ret = 0;
+
+ if (unlikely(mtu >= BUF_SIZE_8KiB))
+ ret = BUF_SIZE_16KiB;
+ return ret;
+}
+
+static void dwmac4_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+
+ /* Inner VLAN */
+ if (inner_type) {
+ u32 des = inner_tag << TDES2_IVT_SHIFT;
+
+ des &= TDES2_IVT_MASK;
+ p->des2 = cpu_to_le32(des);
+
+ des = inner_type << TDES3_IVTIR_SHIFT;
+ des &= TDES3_IVTIR_MASK;
+ p->des3 = cpu_to_le32(des | TDES3_IVLTV);
+ }
+
+ /* Outer VLAN */
+ p->des3 |= cpu_to_le32(tag & TDES3_VLAN_TAG);
+ p->des3 |= cpu_to_le32(TDES3_VLTV);
+
+ p->des3 |= cpu_to_le32(TDES3_CONTEXT_TYPE);
+}
+
+static void dwmac4_set_vlan(struct dma_desc *p, u32 type)
+{
+ type <<= TDES2_VLAN_TAG_SHIFT;
+ p->des2 |= cpu_to_le32(type & TDES2_VLAN_TAG_MASK);
+}
+
+static void dwmac4_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+{
+ *len = le32_to_cpu(p->des2) & RDES2_HL;
+}
+
+static void dwmac4_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool buf2_valid)
+{
+ p->des2 = cpu_to_le32(lower_32_bits(addr));
+ p->des3 = cpu_to_le32(upper_32_bits(addr));
+
+ if (buf2_valid)
+ p->des3 |= cpu_to_le32(RDES3_BUFFER2_VALID_ADDR);
+ else
+ p->des3 &= cpu_to_le32(~RDES3_BUFFER2_VALID_ADDR);
+}
+
+static void dwmac4_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
+{
+ p->des4 = cpu_to_le32((sec & TDES4_LT) | TDES4_LTV);
+ p->des5 = cpu_to_le32(nsec & TDES5_LT);
+ p->des6 = 0;
+ p->des7 = 0;
+}
+
+const struct stmmac_desc_ops dwmac4_desc_ops = {
+ .tx_status = dwmac4_wrback_get_tx_status,
+ .rx_status = dwmac4_wrback_get_rx_status,
+ .get_tx_len = dwmac4_rd_get_tx_len,
+ .get_tx_owner = dwmac4_get_tx_owner,
+ .set_tx_owner = dwmac4_set_tx_owner,
+ .set_rx_owner = dwmac4_set_rx_owner,
+ .get_tx_ls = dwmac4_get_tx_ls,
+ .get_rx_frame_len = dwmac4_wrback_get_rx_frame_len,
+ .enable_tx_timestamp = dwmac4_rd_enable_tx_timestamp,
+ .get_tx_timestamp_status = dwmac4_wrback_get_tx_timestamp_status,
+ .get_rx_timestamp_status = dwmac4_wrback_get_rx_timestamp_status,
+ .get_timestamp = dwmac4_get_timestamp,
+ .set_tx_ic = dwmac4_rd_set_tx_ic,
+ .prepare_tx_desc = dwmac4_rd_prepare_tx_desc,
+ .prepare_tso_tx_desc = dwmac4_rd_prepare_tso_tx_desc,
+ .release_tx_desc = dwmac4_release_tx_desc,
+ .init_rx_desc = dwmac4_rd_init_rx_desc,
+ .init_tx_desc = dwmac4_rd_init_tx_desc,
+ .display_ring = dwmac4_display_ring,
+ .set_mss = dwmac4_set_mss_ctxt,
+ .set_addr = dwmac4_set_addr,
+ .clear = dwmac4_clear,
+ .set_sarc = dwmac4_set_sarc,
+ .set_vlan_tag = dwmac4_set_vlan_tag,
+ .set_vlan = dwmac4_set_vlan,
+ .get_rx_header_len = dwmac4_get_rx_header_len,
+ .set_sec_addr = dwmac4_set_sec_addr,
+ .set_tbs = dwmac4_set_tbs,
+};
+
+const struct stmmac_mode_ops dwmac4_ring_mode_ops = {
+ .set_16kib_bfsize = set_16kib_bfsize,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
new file mode 100644
index 000000000..6da070ccd
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_descs.h
@@ -0,0 +1,147 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Header File to describe the DMA descriptors and related definitions specific
+ * for DesignWare databook 4.xx.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DESCS_H__
+#define __DWMAC4_DESCS_H__
+
+#include <linux/bitops.h>
+
+/* Normal transmit descriptor defines (without split feature) */
+
+/* TDES2 (read format) */
+#define TDES2_BUFFER1_SIZE_MASK GENMASK(13, 0)
+#define TDES2_VLAN_TAG_MASK GENMASK(15, 14)
+#define TDES2_VLAN_TAG_SHIFT 14
+#define TDES2_BUFFER2_SIZE_MASK GENMASK(29, 16)
+#define TDES2_BUFFER2_SIZE_MASK_SHIFT 16
+#define TDES3_IVTIR_MASK GENMASK(19, 18)
+#define TDES3_IVTIR_SHIFT 18
+#define TDES3_IVLTV BIT(17)
+#define TDES2_TIMESTAMP_ENABLE BIT(30)
+#define TDES2_IVT_MASK GENMASK(31, 16)
+#define TDES2_IVT_SHIFT 16
+#define TDES2_INTERRUPT_ON_COMPLETION BIT(31)
+
+/* TDES3 (read format) */
+#define TDES3_PACKET_SIZE_MASK GENMASK(14, 0)
+#define TDES3_VLAN_TAG GENMASK(15, 0)
+#define TDES3_VLTV BIT(16)
+#define TDES3_CHECKSUM_INSERTION_MASK GENMASK(17, 16)
+#define TDES3_CHECKSUM_INSERTION_SHIFT 16
+#define TDES3_TCP_PKT_PAYLOAD_MASK GENMASK(17, 0)
+#define TDES3_TCP_SEGMENTATION_ENABLE BIT(18)
+#define TDES3_HDR_LEN_SHIFT 19
+#define TDES3_SLOT_NUMBER_MASK GENMASK(22, 19)
+#define TDES3_SA_INSERT_CTRL_MASK GENMASK(25, 23)
+#define TDES3_SA_INSERT_CTRL_SHIFT 23
+#define TDES3_CRC_PAD_CTRL_MASK GENMASK(27, 26)
+
+/* TDES3 (write back format) */
+#define TDES3_IP_HDR_ERROR BIT(0)
+#define TDES3_DEFERRED BIT(1)
+#define TDES3_UNDERFLOW_ERROR BIT(2)
+#define TDES3_EXCESSIVE_DEFERRAL BIT(3)
+#define TDES3_COLLISION_COUNT_MASK GENMASK(7, 4)
+#define TDES3_COLLISION_COUNT_SHIFT 4
+#define TDES3_EXCESSIVE_COLLISION BIT(8)
+#define TDES3_LATE_COLLISION BIT(9)
+#define TDES3_NO_CARRIER BIT(10)
+#define TDES3_LOSS_CARRIER BIT(11)
+#define TDES3_PAYLOAD_ERROR BIT(12)
+#define TDES3_PACKET_FLUSHED BIT(13)
+#define TDES3_JABBER_TIMEOUT BIT(14)
+#define TDES3_ERROR_SUMMARY BIT(15)
+#define TDES3_TIMESTAMP_STATUS BIT(17)
+#define TDES3_TIMESTAMP_STATUS_SHIFT 17
+
+/* TDES3 context */
+#define TDES3_CTXT_TCMSSV BIT(26)
+
+/* TDES3 Common */
+#define TDES3_RS1V BIT(26)
+#define TDES3_RS1V_SHIFT 26
+#define TDES3_LAST_DESCRIPTOR BIT(28)
+#define TDES3_LAST_DESCRIPTOR_SHIFT 28
+#define TDES3_FIRST_DESCRIPTOR BIT(29)
+#define TDES3_CONTEXT_TYPE BIT(30)
+#define TDES3_CONTEXT_TYPE_SHIFT 30
+
+/* TDES4 */
+#define TDES4_LTV BIT(31)
+#define TDES4_LT GENMASK(7, 0)
+
+/* TDES5 */
+#define TDES5_LT GENMASK(31, 8)
+
+/* TDS3 use for both format (read and write back) */
+#define TDES3_OWN BIT(31)
+#define TDES3_OWN_SHIFT 31
+
+/* Normal receive descriptor defines (without split feature) */
+
+/* RDES0 (write back format) */
+#define RDES0_VLAN_TAG_MASK GENMASK(15, 0)
+
+/* RDES1 (write back format) */
+#define RDES1_IP_PAYLOAD_TYPE_MASK GENMASK(2, 0)
+#define RDES1_IP_HDR_ERROR BIT(3)
+#define RDES1_IPV4_HEADER BIT(4)
+#define RDES1_IPV6_HEADER BIT(5)
+#define RDES1_IP_CSUM_BYPASSED BIT(6)
+#define RDES1_IP_CSUM_ERROR BIT(7)
+#define RDES1_PTP_MSG_TYPE_MASK GENMASK(11, 8)
+#define RDES1_PTP_PACKET_TYPE BIT(12)
+#define RDES1_PTP_VER BIT(13)
+#define RDES1_TIMESTAMP_AVAILABLE BIT(14)
+#define RDES1_TIMESTAMP_AVAILABLE_SHIFT 14
+#define RDES1_TIMESTAMP_DROPPED BIT(15)
+#define RDES1_IP_TYPE1_CSUM_MASK GENMASK(31, 16)
+
+/* RDES2 (write back format) */
+#define RDES2_L3_L4_HEADER_SIZE_MASK GENMASK(9, 0)
+#define RDES2_VLAN_FILTER_STATUS BIT(15)
+#define RDES2_SA_FILTER_FAIL BIT(16)
+#define RDES2_DA_FILTER_FAIL BIT(17)
+#define RDES2_HASH_FILTER_STATUS BIT(18)
+#define RDES2_MAC_ADDR_MATCH_MASK GENMASK(26, 19)
+#define RDES2_HASH_VALUE_MATCH_MASK GENMASK(26, 19)
+#define RDES2_L3_FILTER_MATCH BIT(27)
+#define RDES2_L4_FILTER_MATCH BIT(28)
+#define RDES2_L3_L4_FILT_NB_MATCH_MASK GENMASK(27, 26)
+#define RDES2_L3_L4_FILT_NB_MATCH_SHIFT 26
+#define RDES2_HL GENMASK(9, 0)
+
+/* RDES3 (write back format) */
+#define RDES3_PACKET_SIZE_MASK GENMASK(14, 0)
+#define RDES3_ERROR_SUMMARY BIT(15)
+#define RDES3_PACKET_LEN_TYPE_MASK GENMASK(18, 16)
+#define RDES3_DRIBBLE_ERROR BIT(19)
+#define RDES3_RECEIVE_ERROR BIT(20)
+#define RDES3_OVERFLOW_ERROR BIT(21)
+#define RDES3_RECEIVE_WATCHDOG BIT(22)
+#define RDES3_GIANT_PACKET BIT(23)
+#define RDES3_CRC_ERROR BIT(24)
+#define RDES3_RDES0_VALID BIT(25)
+#define RDES3_RDES1_VALID BIT(26)
+#define RDES3_RDES2_VALID BIT(27)
+#define RDES3_LAST_DESCRIPTOR BIT(28)
+#define RDES3_FIRST_DESCRIPTOR BIT(29)
+#define RDES3_CONTEXT_DESCRIPTOR BIT(30)
+#define RDES3_CONTEXT_DESCRIPTOR_SHIFT 30
+
+/* RDES3 (read format) */
+#define RDES3_BUFFER1_VALID_ADDR BIT(24)
+#define RDES3_BUFFER2_VALID_ADDR BIT(25)
+#define RDES3_INT_ON_COMPLETION_EN BIT(30)
+
+/* TDS3 use for both format (read and write back) */
+#define RDES3_OWN BIT(31)
+
+#endif /* __DWMAC4_DESCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
new file mode 100644
index 000000000..d99fa028c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -0,0 +1,577 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * This is the driver for the GMAC on-chip Ethernet controller for ST SoCs.
+ * DWC Ether MAC version 4.xx has been used for developing this code.
+ *
+ * This contains the functions to handle the dma.
+ *
+ * Copyright (C) 2015 STMicroelectronics Ltd
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include "dwmac4.h"
+#include "dwmac4_dma.h"
+
+static void dwmac4_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+ u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+ int i;
+
+ pr_info("dwmac4: Master AXI performs %s burst length\n",
+ (value & DMA_SYS_BUS_FB) ? "fixed" : "any");
+
+ if (axi->axi_lpi_en)
+ value |= DMA_AXI_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= DMA_AXI_LPI_XIT_FRM;
+
+ value &= ~DMA_AXI_WR_OSR_LMT;
+ value |= (axi->axi_wr_osr_lmt & DMA_AXI_OSR_MAX) <<
+ DMA_AXI_WR_OSR_LMT_SHIFT;
+
+ value &= ~DMA_AXI_RD_OSR_LMT;
+ value |= (axi->axi_rd_osr_lmt & DMA_AXI_OSR_MAX) <<
+ DMA_AXI_RD_OSR_LMT_SHIFT;
+
+ /* Depending on the UNDEF bit the Master AXI will perform any burst
+ * length according to the BLEN programmed (by default all BLEN are
+ * set).
+ */
+ for (i = 0; i < AXI_BLEN; i++) {
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= DMA_AXI_BLEN256;
+ break;
+ case 128:
+ value |= DMA_AXI_BLEN128;
+ break;
+ case 64:
+ value |= DMA_AXI_BLEN64;
+ break;
+ case 32:
+ value |= DMA_AXI_BLEN32;
+ break;
+ case 16:
+ value |= DMA_AXI_BLEN16;
+ break;
+ case 8:
+ value |= DMA_AXI_BLEN8;
+ break;
+ case 4:
+ value |= DMA_AXI_BLEN4;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + DMA_SYS_BUS_MODE);
+}
+
+static void dwmac4_dma_init_rx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t dma_rx_phy, u32 chan)
+{
+ u32 value;
+ u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+
+ value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+ value = value | (rxpbl << DMA_BUS_MODE_RPBL_SHIFT);
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
+ writel(upper_32_bits(dma_rx_phy),
+ ioaddr + DMA_CHAN_RX_BASE_ADDR_HI(chan));
+
+ writel(lower_32_bits(dma_rx_phy), ioaddr + DMA_CHAN_RX_BASE_ADDR(chan));
+}
+
+static void dwmac4_dma_init_tx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t dma_tx_phy, u32 chan)
+{
+ u32 value;
+ u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ value = value | (txpbl << DMA_BUS_MODE_PBL_SHIFT);
+
+ /* Enable OSP to get best performance */
+ value |= DMA_CONTROL_OSP;
+
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) && likely(dma_cfg->eame))
+ writel(upper_32_bits(dma_tx_phy),
+ ioaddr + DMA_CHAN_TX_BASE_ADDR_HI(chan));
+
+ writel(lower_32_bits(dma_tx_phy), ioaddr + DMA_CHAN_TX_BASE_ADDR(chan));
+}
+
+static void dwmac4_dma_init_channel(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+ u32 value;
+
+ /* common channel control register config */
+ value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ if (dma_cfg->pblx8)
+ value = value | DMA_BUS_MODE_PBL;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_CHAN_INTR_DEFAULT_MASK,
+ ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
+static void dwmac410_dma_init_channel(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+ u32 value;
+
+ /* common channel control register config */
+ value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ if (dma_cfg->pblx8)
+ value = value | DMA_BUS_MODE_PBL;
+
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+
+ /* Mask interrupts by writing to CSR7 */
+ writel(DMA_CHAN_INTR_DEFAULT_MASK_4_10,
+ ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
+static void dwmac4_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, int atds)
+{
+ u32 value = readl(ioaddr + DMA_SYS_BUS_MODE);
+
+ /* Set the Fixed burst mode */
+ if (dma_cfg->fixed_burst)
+ value |= DMA_SYS_BUS_FB;
+
+ /* Mixed Burst has no effect when fb is set */
+ if (dma_cfg->mixed_burst)
+ value |= DMA_SYS_BUS_MB;
+
+ if (dma_cfg->aal)
+ value |= DMA_SYS_BUS_AAL;
+
+ if (dma_cfg->eame)
+ value |= DMA_SYS_BUS_EAME;
+
+ writel(value, ioaddr + DMA_SYS_BUS_MODE);
+
+ value = readl(ioaddr + DMA_BUS_MODE);
+
+ if (dma_cfg->multi_msi_en) {
+ value &= ~DMA_BUS_MODE_INTM_MASK;
+ value |= (DMA_BUS_MODE_INTM_MODE1 << DMA_BUS_MODE_INTM_SHIFT);
+ }
+
+ if (dma_cfg->dche)
+ value |= DMA_BUS_MODE_DCHE;
+
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+}
+
+static void _dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 channel,
+ u32 *reg_space)
+{
+ reg_space[DMA_CHAN_CONTROL(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CONTROL(channel));
+ reg_space[DMA_CHAN_TX_CONTROL(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_CONTROL(channel));
+ reg_space[DMA_CHAN_RX_CONTROL(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_CONTROL(channel));
+ reg_space[DMA_CHAN_TX_BASE_ADDR(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_BASE_ADDR(channel));
+ reg_space[DMA_CHAN_RX_BASE_ADDR(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_BASE_ADDR(channel));
+ reg_space[DMA_CHAN_TX_END_ADDR(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_END_ADDR(channel));
+ reg_space[DMA_CHAN_RX_END_ADDR(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_END_ADDR(channel));
+ reg_space[DMA_CHAN_TX_RING_LEN(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_TX_RING_LEN(channel));
+ reg_space[DMA_CHAN_RX_RING_LEN(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_RING_LEN(channel));
+ reg_space[DMA_CHAN_INTR_ENA(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_INTR_ENA(channel));
+ reg_space[DMA_CHAN_RX_WATCHDOG(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_RX_WATCHDOG(channel));
+ reg_space[DMA_CHAN_SLOT_CTRL_STATUS(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_SLOT_CTRL_STATUS(channel));
+ reg_space[DMA_CHAN_CUR_TX_DESC(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_TX_DESC(channel));
+ reg_space[DMA_CHAN_CUR_RX_DESC(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_RX_DESC(channel));
+ reg_space[DMA_CHAN_CUR_TX_BUF_ADDR(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_TX_BUF_ADDR(channel));
+ reg_space[DMA_CHAN_CUR_RX_BUF_ADDR(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_CUR_RX_BUF_ADDR(channel));
+ reg_space[DMA_CHAN_STATUS(channel) / 4] =
+ readl(ioaddr + DMA_CHAN_STATUS(channel));
+}
+
+static void dwmac4_dump_dma_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = 0; i < DMA_CHANNEL_NB_MAX; i++)
+ _dwmac4_dump_dma_regs(ioaddr, i, reg_space);
+}
+
+static void dwmac4_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
+{
+ writel(riwt, ioaddr + DMA_CHAN_RX_WATCHDOG(queue));
+}
+
+static void dwmac4_dma_rx_chan_op_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ unsigned int rqs = fifosz / 256 - 1;
+ u32 mtl_rx_op;
+
+ mtl_rx_op = readl(ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable RX store and forward mode\n");
+ mtl_rx_op |= MTL_OP_MODE_RSF;
+ } else {
+ pr_debug("GMAC: disable RX SF mode (threshold %d)\n", mode);
+ mtl_rx_op &= ~MTL_OP_MODE_RSF;
+ mtl_rx_op &= MTL_OP_MODE_RTC_MASK;
+ if (mode <= 32)
+ mtl_rx_op |= MTL_OP_MODE_RTC_32;
+ else if (mode <= 64)
+ mtl_rx_op |= MTL_OP_MODE_RTC_64;
+ else if (mode <= 96)
+ mtl_rx_op |= MTL_OP_MODE_RTC_96;
+ else
+ mtl_rx_op |= MTL_OP_MODE_RTC_128;
+ }
+
+ mtl_rx_op &= ~MTL_OP_MODE_RQS_MASK;
+ mtl_rx_op |= rqs << MTL_OP_MODE_RQS_SHIFT;
+
+ /* Enable flow control only if each channel gets 4 KiB or more FIFO and
+ * only if channel is not an AVB channel.
+ */
+ if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
+ unsigned int rfd, rfa;
+
+ mtl_rx_op |= MTL_OP_MODE_EHFC;
+
+ /* Set Threshold for Activating Flow Control to min 2 frames,
+ * i.e. 1500 * 2 = 3000 bytes.
+ *
+ * Set Threshold for Deactivating Flow Control to min 1 frame,
+ * i.e. 1500 bytes.
+ */
+ switch (fifosz) {
+ case 4096:
+ /* This violates the above formula because of FIFO size
+ * limit therefore overflow may occur in spite of this.
+ */
+ rfd = 0x03; /* Full-2.5K */
+ rfa = 0x01; /* Full-1.5K */
+ break;
+
+ default:
+ rfd = 0x07; /* Full-4.5K */
+ rfa = 0x04; /* Full-3K */
+ break;
+ }
+
+ mtl_rx_op &= ~MTL_OP_MODE_RFD_MASK;
+ mtl_rx_op |= rfd << MTL_OP_MODE_RFD_SHIFT;
+
+ mtl_rx_op &= ~MTL_OP_MODE_RFA_MASK;
+ mtl_rx_op |= rfa << MTL_OP_MODE_RFA_SHIFT;
+ }
+
+ writel(mtl_rx_op, ioaddr + MTL_CHAN_RX_OP_MODE(channel));
+}
+
+static void dwmac4_dma_tx_chan_op_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+ unsigned int tqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ pr_debug("GMAC: enable TX store and forward mode\n");
+ /* Transmit COE type 2 cannot be done in cut-through mode. */
+ mtl_tx_op |= MTL_OP_MODE_TSF;
+ } else {
+ pr_debug("GMAC: disabling TX SF (threshold %d)\n", mode);
+ mtl_tx_op &= ~MTL_OP_MODE_TSF;
+ mtl_tx_op &= MTL_OP_MODE_TTC_MASK;
+ /* Set the transmit threshold */
+ if (mode <= 32)
+ mtl_tx_op |= MTL_OP_MODE_TTC_32;
+ else if (mode <= 64)
+ mtl_tx_op |= MTL_OP_MODE_TTC_64;
+ else if (mode <= 96)
+ mtl_tx_op |= MTL_OP_MODE_TTC_96;
+ else if (mode <= 128)
+ mtl_tx_op |= MTL_OP_MODE_TTC_128;
+ else if (mode <= 192)
+ mtl_tx_op |= MTL_OP_MODE_TTC_192;
+ else if (mode <= 256)
+ mtl_tx_op |= MTL_OP_MODE_TTC_256;
+ else if (mode <= 384)
+ mtl_tx_op |= MTL_OP_MODE_TTC_384;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TTC_512;
+ }
+ /* For an IP with DWC_EQOS_NUM_TXQ == 1, the fields TXQEN and TQS are RO
+ * with reset values: TXQEN on, TQS == DWC_EQOS_TXFIFO_SIZE.
+ * For an IP with DWC_EQOS_NUM_TXQ > 1, the fields TXQEN and TQS are R/W
+ * with reset values: TXQEN off, TQS 256 bytes.
+ *
+ * TXQEN must be written for multi-channel operation and TQS must
+ * reflect the available fifo size per queue (total fifo size / number
+ * of enabled queues).
+ */
+ mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
+ if (qmode != MTL_QUEUE_AVB)
+ mtl_tx_op |= MTL_OP_MODE_TXQEN;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
+ mtl_tx_op &= ~MTL_OP_MODE_TQS_MASK;
+ mtl_tx_op |= tqs << MTL_OP_MODE_TQS_SHIFT;
+
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+}
+
+static int dwmac4_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
+{
+ u32 hw_cap = readl(ioaddr + GMAC_HW_FEATURE0);
+
+ /* MAC HW feature0 */
+ dma_cap->mbps_10_100 = (hw_cap & GMAC_HW_FEAT_MIISEL);
+ dma_cap->mbps_1000 = (hw_cap & GMAC_HW_FEAT_GMIISEL) >> 1;
+ dma_cap->half_duplex = (hw_cap & GMAC_HW_FEAT_HDSEL) >> 2;
+ dma_cap->vlhash = (hw_cap & GMAC_HW_FEAT_VLHASH) >> 4;
+ dma_cap->multi_addr = (hw_cap & GMAC_HW_FEAT_ADDMAC) >> 18;
+ dma_cap->pcs = (hw_cap & GMAC_HW_FEAT_PCSSEL) >> 3;
+ dma_cap->sma_mdio = (hw_cap & GMAC_HW_FEAT_SMASEL) >> 5;
+ dma_cap->pmt_remote_wake_up = (hw_cap & GMAC_HW_FEAT_RWKSEL) >> 6;
+ dma_cap->pmt_magic_frame = (hw_cap & GMAC_HW_FEAT_MGKSEL) >> 7;
+ /* MMC */
+ dma_cap->rmon = (hw_cap & GMAC_HW_FEAT_MMCSEL) >> 8;
+ /* IEEE 1588-2008 */
+ dma_cap->atime_stamp = (hw_cap & GMAC_HW_FEAT_TSSEL) >> 12;
+ /* 802.3az - Energy-Efficient Ethernet (EEE) */
+ dma_cap->eee = (hw_cap & GMAC_HW_FEAT_EEESEL) >> 13;
+ /* TX and RX csum */
+ dma_cap->tx_coe = (hw_cap & GMAC_HW_FEAT_TXCOSEL) >> 14;
+ dma_cap->rx_coe = (hw_cap & GMAC_HW_FEAT_RXCOESEL) >> 16;
+ dma_cap->vlins = (hw_cap & GMAC_HW_FEAT_SAVLANINS) >> 27;
+ dma_cap->arpoffsel = (hw_cap & GMAC_HW_FEAT_ARPOFFSEL) >> 9;
+
+ /* MAC HW feature1 */
+ hw_cap = readl(ioaddr + GMAC_HW_FEATURE1);
+ dma_cap->l3l4fnum = (hw_cap & GMAC_HW_FEAT_L3L4FNUM) >> 27;
+ dma_cap->hash_tb_sz = (hw_cap & GMAC_HW_HASH_TB_SZ) >> 24;
+ dma_cap->av = (hw_cap & GMAC_HW_FEAT_AVSEL) >> 20;
+ dma_cap->tsoen = (hw_cap & GMAC_HW_TSOEN) >> 18;
+ dma_cap->sphen = (hw_cap & GMAC_HW_FEAT_SPHEN) >> 17;
+
+ dma_cap->addr64 = (hw_cap & GMAC_HW_ADDR64) >> 14;
+ switch (dma_cap->addr64) {
+ case 0:
+ dma_cap->addr64 = 32;
+ break;
+ case 1:
+ dma_cap->addr64 = 40;
+ break;
+ case 2:
+ dma_cap->addr64 = 48;
+ break;
+ default:
+ dma_cap->addr64 = 32;
+ break;
+ }
+
+ /* RX and TX FIFO sizes are encoded as log2(n / 128). Undo that by
+ * shifting and store the sizes in bytes.
+ */
+ dma_cap->tx_fifo_size = 128 << ((hw_cap & GMAC_HW_TXFIFOSIZE) >> 6);
+ dma_cap->rx_fifo_size = 128 << ((hw_cap & GMAC_HW_RXFIFOSIZE) >> 0);
+ /* MAC HW feature2 */
+ hw_cap = readl(ioaddr + GMAC_HW_FEATURE2);
+ /* TX and RX number of channels */
+ dma_cap->number_rx_channel =
+ ((hw_cap & GMAC_HW_FEAT_RXCHCNT) >> 12) + 1;
+ dma_cap->number_tx_channel =
+ ((hw_cap & GMAC_HW_FEAT_TXCHCNT) >> 18) + 1;
+ /* TX and RX number of queues */
+ dma_cap->number_rx_queues =
+ ((hw_cap & GMAC_HW_FEAT_RXQCNT) >> 0) + 1;
+ dma_cap->number_tx_queues =
+ ((hw_cap & GMAC_HW_FEAT_TXQCNT) >> 6) + 1;
+ /* PPS output */
+ dma_cap->pps_out_num = (hw_cap & GMAC_HW_FEAT_PPSOUTNUM) >> 24;
+
+ /* IEEE 1588-2002 */
+ dma_cap->time_stamp = 0;
+ /* Number of Auxiliary Snapshot Inputs */
+ dma_cap->aux_snapshot_n = (hw_cap & GMAC_HW_FEAT_AUXSNAPNUM) >> 28;
+
+ /* MAC HW feature3 */
+ hw_cap = readl(ioaddr + GMAC_HW_FEATURE3);
+
+ /* 5.10 Features */
+ dma_cap->asp = (hw_cap & GMAC_HW_FEAT_ASP) >> 28;
+ dma_cap->tbssel = (hw_cap & GMAC_HW_FEAT_TBSSEL) >> 27;
+ dma_cap->fpesel = (hw_cap & GMAC_HW_FEAT_FPESEL) >> 26;
+ dma_cap->estwid = (hw_cap & GMAC_HW_FEAT_ESTWID) >> 20;
+ dma_cap->estdep = (hw_cap & GMAC_HW_FEAT_ESTDEP) >> 17;
+ dma_cap->estsel = (hw_cap & GMAC_HW_FEAT_ESTSEL) >> 16;
+ dma_cap->frpes = (hw_cap & GMAC_HW_FEAT_FRPES) >> 13;
+ dma_cap->frpbs = (hw_cap & GMAC_HW_FEAT_FRPBS) >> 11;
+ dma_cap->frpsel = (hw_cap & GMAC_HW_FEAT_FRPSEL) >> 10;
+ dma_cap->dvlan = (hw_cap & GMAC_HW_FEAT_DVLAN) >> 5;
+
+ return 0;
+}
+
+/* Enable/disable TSO feature and set MSS */
+static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value;
+
+ if (en) {
+ /* enable TSO */
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value | DMA_CONTROL_TSE,
+ ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ } else {
+ /* enable TSO */
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ writel(value & ~DMA_CONTROL_TSE,
+ ioaddr + DMA_CHAN_TX_CONTROL(chan));
+ }
+}
+
+static void dwmac4_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+{
+ u32 mtl_tx_op = readl(ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+
+ mtl_tx_op &= ~MTL_OP_MODE_TXQEN_MASK;
+ if (qmode != MTL_QUEUE_AVB)
+ mtl_tx_op |= MTL_OP_MODE_TXQEN;
+ else
+ mtl_tx_op |= MTL_OP_MODE_TXQEN_AV;
+
+ writel(mtl_tx_op, ioaddr + MTL_CHAN_TX_OP_MODE(channel));
+}
+
+static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+ value &= ~DMA_RBSZ_MASK;
+ value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+}
+
+static void dwmac4_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + GMAC_EXT_CONFIG);
+
+ value &= ~GMAC_CONFIG_HDSMS;
+ value |= GMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ writel(value, ioaddr + GMAC_EXT_CONFIG);
+
+ value = readl(ioaddr + DMA_CHAN_CONTROL(chan));
+ if (en)
+ value |= DMA_CONTROL_SPH;
+ else
+ value &= ~DMA_CONTROL_SPH;
+ writel(value, ioaddr + DMA_CHAN_CONTROL(chan));
+}
+
+static int dwmac4_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ if (en)
+ value |= DMA_CONTROL_EDSE;
+ else
+ value &= ~DMA_CONTROL_EDSE;
+
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan)) & DMA_CONTROL_EDSE;
+ if (en && !value)
+ return -EIO;
+
+ writel(DMA_TBS_DEF_FTOS, ioaddr + DMA_TBS_CTRL);
+ return 0;
+}
+
+const struct stmmac_dma_ops dwmac4_dma_ops = {
+ .reset = dwmac4_dma_reset,
+ .init = dwmac4_dma_init,
+ .init_chan = dwmac4_dma_init_channel,
+ .init_rx_chan = dwmac4_dma_init_rx_chan,
+ .init_tx_chan = dwmac4_dma_init_tx_chan,
+ .axi = dwmac4_dma_axi,
+ .dump_regs = dwmac4_dump_dma_regs,
+ .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
+ .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
+ .enable_dma_irq = dwmac4_enable_dma_irq,
+ .disable_dma_irq = dwmac4_disable_dma_irq,
+ .start_tx = dwmac4_dma_start_tx,
+ .stop_tx = dwmac4_dma_stop_tx,
+ .start_rx = dwmac4_dma_start_rx,
+ .stop_rx = dwmac4_dma_stop_rx,
+ .dma_interrupt = dwmac4_dma_interrupt,
+ .get_hw_feature = dwmac4_get_hw_feature,
+ .rx_watchdog = dwmac4_rx_watchdog,
+ .set_rx_ring_len = dwmac4_set_rx_ring_len,
+ .set_tx_ring_len = dwmac4_set_tx_ring_len,
+ .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+ .enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
+ .set_bfsize = dwmac4_set_bfsize,
+ .enable_sph = dwmac4_enable_sph,
+};
+
+const struct stmmac_dma_ops dwmac410_dma_ops = {
+ .reset = dwmac4_dma_reset,
+ .init = dwmac4_dma_init,
+ .init_chan = dwmac410_dma_init_channel,
+ .init_rx_chan = dwmac4_dma_init_rx_chan,
+ .init_tx_chan = dwmac4_dma_init_tx_chan,
+ .axi = dwmac4_dma_axi,
+ .dump_regs = dwmac4_dump_dma_regs,
+ .dma_rx_mode = dwmac4_dma_rx_chan_op_mode,
+ .dma_tx_mode = dwmac4_dma_tx_chan_op_mode,
+ .enable_dma_irq = dwmac410_enable_dma_irq,
+ .disable_dma_irq = dwmac4_disable_dma_irq,
+ .start_tx = dwmac4_dma_start_tx,
+ .stop_tx = dwmac4_dma_stop_tx,
+ .start_rx = dwmac4_dma_start_rx,
+ .stop_rx = dwmac4_dma_stop_rx,
+ .dma_interrupt = dwmac4_dma_interrupt,
+ .get_hw_feature = dwmac4_get_hw_feature,
+ .rx_watchdog = dwmac4_rx_watchdog,
+ .set_rx_ring_len = dwmac4_set_rx_ring_len,
+ .set_tx_ring_len = dwmac4_set_tx_ring_len,
+ .set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
+ .enable_tso = dwmac4_enable_tso,
+ .qmode = dwmac4_qmode,
+ .set_bfsize = dwmac4_set_bfsize,
+ .enable_sph = dwmac4_enable_sph,
+ .enable_tbs = dwmac4_enable_tbs,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
new file mode 100644
index 000000000..9321879b5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -0,0 +1,238 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * DWMAC4 DMA Header file.
+ *
+ * Copyright (C) 2007-2015 STMicroelectronics Ltd
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#ifndef __DWMAC4_DMA_H__
+#define __DWMAC4_DMA_H__
+
+/* Define the max channel number used for tx (also rx).
+ * dwmac4 accepts up to 8 channels for TX (and also 8 channels for RX
+ */
+#define DMA_CHANNEL_NB_MAX 1
+
+#define DMA_BUS_MODE 0x00001000
+#define DMA_SYS_BUS_MODE 0x00001004
+#define DMA_STATUS 0x00001008
+#define DMA_DEBUG_STATUS_0 0x0000100c
+#define DMA_DEBUG_STATUS_1 0x00001010
+#define DMA_DEBUG_STATUS_2 0x00001014
+#define DMA_AXI_BUS_MODE 0x00001028
+#define DMA_TBS_CTRL 0x00001050
+
+/* DMA Bus Mode bitmap */
+#define DMA_BUS_MODE_DCHE BIT(19)
+#define DMA_BUS_MODE_INTM_MASK GENMASK(17, 16)
+#define DMA_BUS_MODE_INTM_SHIFT 16
+#define DMA_BUS_MODE_INTM_MODE1 0x1
+#define DMA_BUS_MODE_SFT_RESET BIT(0)
+
+/* DMA SYS Bus Mode bitmap */
+#define DMA_BUS_MODE_SPH BIT(24)
+#define DMA_BUS_MODE_PBL BIT(16)
+#define DMA_BUS_MODE_PBL_SHIFT 16
+#define DMA_BUS_MODE_RPBL_SHIFT 16
+#define DMA_BUS_MODE_MB BIT(14)
+#define DMA_BUS_MODE_FB BIT(0)
+
+/* DMA Interrupt top status */
+#define DMA_STATUS_MAC BIT(17)
+#define DMA_STATUS_MTL BIT(16)
+#define DMA_STATUS_CHAN7 BIT(7)
+#define DMA_STATUS_CHAN6 BIT(6)
+#define DMA_STATUS_CHAN5 BIT(5)
+#define DMA_STATUS_CHAN4 BIT(4)
+#define DMA_STATUS_CHAN3 BIT(3)
+#define DMA_STATUS_CHAN2 BIT(2)
+#define DMA_STATUS_CHAN1 BIT(1)
+#define DMA_STATUS_CHAN0 BIT(0)
+
+/* DMA debug status bitmap */
+#define DMA_DEBUG_STATUS_TS_MASK 0xf
+#define DMA_DEBUG_STATUS_RS_MASK 0xf
+
+/* DMA AXI bitmap */
+#define DMA_AXI_EN_LPI BIT(31)
+#define DMA_AXI_LPI_XIT_FRM BIT(30)
+#define DMA_AXI_WR_OSR_LMT GENMASK(27, 24)
+#define DMA_AXI_WR_OSR_LMT_SHIFT 24
+#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
+#define DMA_AXI_RD_OSR_LMT_SHIFT 16
+
+#define DMA_AXI_OSR_MAX 0xf
+#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
+ (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
+
+#define DMA_SYS_BUS_MB BIT(14)
+#define DMA_AXI_1KBBE BIT(13)
+#define DMA_SYS_BUS_AAL BIT(12)
+#define DMA_SYS_BUS_EAME BIT(11)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_SYS_BUS_FB BIT(0)
+
+#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
+ DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
+ DMA_AXI_BLEN4)
+
+#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+
+/* DMA TBS Control */
+#define DMA_TBS_FTOS GENMASK(31, 8)
+#define DMA_TBS_FTOV BIT(0)
+#define DMA_TBS_DEF_FTOS (DMA_TBS_FTOS | DMA_TBS_FTOV)
+
+/* Following DMA defines are chanels oriented */
+#define DMA_CHAN_BASE_ADDR 0x00001100
+#define DMA_CHAN_BASE_OFFSET 0x80
+#define DMA_CHANX_BASE_ADDR(x) (DMA_CHAN_BASE_ADDR + \
+ (x * DMA_CHAN_BASE_OFFSET))
+#define DMA_CHAN_REG_NUMBER 17
+
+#define DMA_CHAN_CONTROL(x) DMA_CHANX_BASE_ADDR(x)
+#define DMA_CHAN_TX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x4)
+#define DMA_CHAN_RX_CONTROL(x) (DMA_CHANX_BASE_ADDR(x) + 0x8)
+#define DMA_CHAN_TX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x10)
+#define DMA_CHAN_TX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x14)
+#define DMA_CHAN_RX_BASE_ADDR_HI(x) (DMA_CHANX_BASE_ADDR(x) + 0x18)
+#define DMA_CHAN_RX_BASE_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x1c)
+#define DMA_CHAN_TX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x20)
+#define DMA_CHAN_RX_END_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x28)
+#define DMA_CHAN_TX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x2c)
+#define DMA_CHAN_RX_RING_LEN(x) (DMA_CHANX_BASE_ADDR(x) + 0x30)
+#define DMA_CHAN_INTR_ENA(x) (DMA_CHANX_BASE_ADDR(x) + 0x34)
+#define DMA_CHAN_RX_WATCHDOG(x) (DMA_CHANX_BASE_ADDR(x) + 0x38)
+#define DMA_CHAN_SLOT_CTRL_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x3c)
+#define DMA_CHAN_CUR_TX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x44)
+#define DMA_CHAN_CUR_RX_DESC(x) (DMA_CHANX_BASE_ADDR(x) + 0x4c)
+#define DMA_CHAN_CUR_TX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x54)
+#define DMA_CHAN_CUR_RX_BUF_ADDR(x) (DMA_CHANX_BASE_ADDR(x) + 0x5c)
+#define DMA_CHAN_STATUS(x) (DMA_CHANX_BASE_ADDR(x) + 0x60)
+
+/* DMA Control X */
+#define DMA_CONTROL_SPH BIT(24)
+#define DMA_CONTROL_MSS_MASK GENMASK(13, 0)
+
+/* DMA Tx Channel X Control register defines */
+#define DMA_CONTROL_EDSE BIT(28)
+#define DMA_CONTROL_TSE BIT(12)
+#define DMA_CONTROL_OSP BIT(4)
+#define DMA_CONTROL_ST BIT(0)
+
+/* DMA Rx Channel X Control register defines */
+#define DMA_CONTROL_SR BIT(0)
+#define DMA_RBSZ_MASK GENMASK(14, 1)
+#define DMA_RBSZ_SHIFT 1
+
+/* Interrupt status per channel */
+#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
+#define DMA_CHAN_STATUS_REB_SHIFT 19
+#define DMA_CHAN_STATUS_TEB GENMASK(18, 16)
+#define DMA_CHAN_STATUS_TEB_SHIFT 16
+#define DMA_CHAN_STATUS_NIS BIT(15)
+#define DMA_CHAN_STATUS_AIS BIT(14)
+#define DMA_CHAN_STATUS_CDE BIT(13)
+#define DMA_CHAN_STATUS_FBE BIT(12)
+#define DMA_CHAN_STATUS_ERI BIT(11)
+#define DMA_CHAN_STATUS_ETI BIT(10)
+#define DMA_CHAN_STATUS_RWT BIT(9)
+#define DMA_CHAN_STATUS_RPS BIT(8)
+#define DMA_CHAN_STATUS_RBU BIT(7)
+#define DMA_CHAN_STATUS_RI BIT(6)
+#define DMA_CHAN_STATUS_TBU BIT(2)
+#define DMA_CHAN_STATUS_TPS BIT(1)
+#define DMA_CHAN_STATUS_TI BIT(0)
+
+#define DMA_CHAN_STATUS_MSK_COMMON (DMA_CHAN_STATUS_NIS | \
+ DMA_CHAN_STATUS_AIS | \
+ DMA_CHAN_STATUS_CDE | \
+ DMA_CHAN_STATUS_FBE)
+
+#define DMA_CHAN_STATUS_MSK_RX (DMA_CHAN_STATUS_REB | \
+ DMA_CHAN_STATUS_ERI | \
+ DMA_CHAN_STATUS_RWT | \
+ DMA_CHAN_STATUS_RPS | \
+ DMA_CHAN_STATUS_RBU | \
+ DMA_CHAN_STATUS_RI | \
+ DMA_CHAN_STATUS_MSK_COMMON)
+
+#define DMA_CHAN_STATUS_MSK_TX (DMA_CHAN_STATUS_ETI | \
+ DMA_CHAN_STATUS_TBU | \
+ DMA_CHAN_STATUS_TPS | \
+ DMA_CHAN_STATUS_TI | \
+ DMA_CHAN_STATUS_MSK_COMMON)
+
+/* Interrupt enable bits per channel */
+#define DMA_CHAN_INTR_ENA_NIE BIT(16)
+#define DMA_CHAN_INTR_ENA_AIE BIT(15)
+#define DMA_CHAN_INTR_ENA_NIE_4_10 BIT(15)
+#define DMA_CHAN_INTR_ENA_AIE_4_10 BIT(14)
+#define DMA_CHAN_INTR_ENA_CDE BIT(13)
+#define DMA_CHAN_INTR_ENA_FBE BIT(12)
+#define DMA_CHAN_INTR_ENA_ERE BIT(11)
+#define DMA_CHAN_INTR_ENA_ETE BIT(10)
+#define DMA_CHAN_INTR_ENA_RWE BIT(9)
+#define DMA_CHAN_INTR_ENA_RSE BIT(8)
+#define DMA_CHAN_INTR_ENA_RBUE BIT(7)
+#define DMA_CHAN_INTR_ENA_RIE BIT(6)
+#define DMA_CHAN_INTR_ENA_TBUE BIT(2)
+#define DMA_CHAN_INTR_ENA_TSE BIT(1)
+#define DMA_CHAN_INTR_ENA_TIE BIT(0)
+
+#define DMA_CHAN_INTR_NORMAL (DMA_CHAN_INTR_ENA_NIE | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL (DMA_CHAN_INTR_ENA_AIE | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.00 */
+#define DMA_CHAN_INTR_DEFAULT_MASK (DMA_CHAN_INTR_NORMAL | \
+ DMA_CHAN_INTR_ABNORMAL)
+#define DMA_CHAN_INTR_DEFAULT_RX (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX (DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_NORMAL_4_10 (DMA_CHAN_INTR_ENA_NIE_4_10 | \
+ DMA_CHAN_INTR_ENA_RIE | \
+ DMA_CHAN_INTR_ENA_TIE)
+
+#define DMA_CHAN_INTR_ABNORMAL_4_10 (DMA_CHAN_INTR_ENA_AIE_4_10 | \
+ DMA_CHAN_INTR_ENA_FBE)
+/* DMA default interrupt mask for 4.10a */
+#define DMA_CHAN_INTR_DEFAULT_MASK_4_10 (DMA_CHAN_INTR_NORMAL_4_10 | \
+ DMA_CHAN_INTR_ABNORMAL_4_10)
+#define DMA_CHAN_INTR_DEFAULT_RX_4_10 (DMA_CHAN_INTR_ENA_RIE)
+#define DMA_CHAN_INTR_DEFAULT_TX_4_10 (DMA_CHAN_INTR_ENA_TIE)
+
+/* channel 0 specific fields */
+#define DMA_CHAN0_DBG_STAT_TPS GENMASK(15, 12)
+#define DMA_CHAN0_DBG_STAT_TPS_SHIFT 12
+#define DMA_CHAN0_DBG_STAT_RPS GENMASK(11, 8)
+#define DMA_CHAN0_DBG_STAT_RPS_SHIFT 8
+
+int dwmac4_dma_reset(void __iomem *ioaddr);
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan);
+void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan);
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan, u32 dir);
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan);
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+
+#endif /* __DWMAC4_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
new file mode 100644
index 000000000..7c26394f6
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007-2015 STMicroelectronics Ltd
+ *
+ * Author: Alexandre Torgue <alexandre.torgue@st.com>
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include "common.h"
+#include "dwmac4_dma.h"
+#include "dwmac4.h"
+
+int dwmac4_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 1000000);
+}
+
+void dwmac4_set_rx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+ writel(tail_ptr, ioaddr + DMA_CHAN_RX_END_ADDR(chan));
+}
+
+void dwmac4_set_tx_tail_ptr(void __iomem *ioaddr, u32 tail_ptr, u32 chan)
+{
+ writel(tail_ptr, ioaddr + DMA_CHAN_TX_END_ADDR(chan));
+}
+
+void dwmac4_dma_start_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value |= GMAC_CONFIG_TE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_TX_CONTROL(chan));
+
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CHAN_TX_CONTROL(chan));
+}
+
+void dwmac4_dma_start_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+ value |= DMA_CONTROL_SR;
+
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+ value = readl(ioaddr + GMAC_CONFIG);
+ value |= GMAC_CONFIG_RE;
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void dwmac4_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+}
+
+void dwmac4_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + DMA_CHAN_TX_RING_LEN(chan));
+}
+
+void dwmac4_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + DMA_CHAN_RX_RING_LEN(chan));
+}
+
+void dwmac4_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value |= DMA_CHAN_INTR_DEFAULT_RX;
+ if (tx)
+ value |= DMA_CHAN_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
+void dwmac410_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value |= DMA_CHAN_INTR_DEFAULT_RX_4_10;
+ if (tx)
+ value |= DMA_CHAN_INTR_DEFAULT_TX_4_10;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
+void dwmac4_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_RX;
+ if (tx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
+void dwmac410_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+
+ if (rx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_RX_4_10;
+ if (tx)
+ value &= ~DMA_CHAN_INTR_DEFAULT_TX_4_10;
+
+ writel(value, ioaddr + DMA_CHAN_INTR_ENA(chan));
+}
+
+int dwmac4_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan, u32 dir)
+{
+ u32 intr_status = readl(ioaddr + DMA_CHAN_STATUS(chan));
+ u32 intr_en = readl(ioaddr + DMA_CHAN_INTR_ENA(chan));
+ int ret = 0;
+
+ if (dir == DMA_DIR_RX)
+ intr_status &= DMA_CHAN_STATUS_MSK_RX;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= DMA_CHAN_STATUS_MSK_TX;
+
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_CHAN_STATUS_AIS)) {
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RBU))
+ x->rx_buf_unav_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RPS))
+ x->rx_process_stopped_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_RWT))
+ x->rx_watchdog_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_ETI))
+ x->tx_early_irq++;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_CHAN_STATUS_FBE)) {
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & DMA_CHAN_STATUS_NIS))
+ x->normal_irq_n++;
+ if (likely(intr_status & DMA_CHAN_STATUS_RI)) {
+ x->rx_normal_irq_n++;
+ x->rxq_stats[chan].rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ if (likely(intr_status & DMA_CHAN_STATUS_TI)) {
+ x->tx_normal_irq_n++;
+ x->txq_stats[chan].tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_CHAN_STATUS_TBU))
+ ret |= handle_tx;
+ if (unlikely(intr_status & DMA_CHAN_STATUS_ERI))
+ x->rx_early_irq++;
+
+ writel(intr_status & intr_en, ioaddr + DMA_CHAN_STATUS(chan));
+ return ret;
+}
+
+void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ /* For MAC Addr registers se have to set the Address Enable (AE)
+ * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+ * is RO.
+ */
+ data |= (STMMAC_CHAN0 << GMAC_HI_DCS_SHIFT);
+ writel(data | GMAC_HI_REG_AE, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+}
+
+/* Enable disable MAC RX/TX */
+void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + GMAC_CONFIG);
+ u32 old_val = value;
+
+ if (enable)
+ value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE;
+ else
+ value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE);
+
+ if (value != old_val)
+ writel(value, ioaddr + GMAC_CONFIG);
+}
+
+void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.c b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
new file mode 100644
index 000000000..8fd167501
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.c
@@ -0,0 +1,777 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
+// stmmac Support for 5.xx Ethernet QoS cores
+
+#include <linux/bitops.h>
+#include <linux/iopoll.h>
+#include "common.h"
+#include "dwmac4.h"
+#include "dwmac5.h"
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+struct dwmac5_error_desc {
+ bool valid;
+ const char *desc;
+ const char *detailed_desc;
+};
+
+#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
+
+static void dwmac5_log_error(struct net_device *ndev, u32 value, bool corr,
+ const char *module_name, const struct dwmac5_error_desc *desc,
+ unsigned long field_offset, struct stmmac_safety_stats *stats)
+{
+ unsigned long loc, mask;
+ u8 *bptr = (u8 *)stats;
+ unsigned long *ptr;
+
+ ptr = (unsigned long *)(bptr + field_offset);
+
+ mask = value;
+ for_each_set_bit(loc, &mask, 32) {
+ netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
+ "correctable" : "uncorrectable", module_name,
+ desc[loc].desc, desc[loc].detailed_desc);
+
+ /* Update counters */
+ ptr[loc]++;
+ }
+}
+
+static const struct dwmac5_error_desc dwmac5_mac_errors[32]= {
+ { true, "ATPES", "Application Transmit Interface Parity Check Error" },
+ { true, "TPES", "TSO Data Path Parity Check Error" },
+ { true, "RDPES", "Read Descriptor Parity Check Error" },
+ { true, "MPES", "MTL Data Path Parity Check Error" },
+ { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
+ { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
+ { true, "CWPES", "CSR Write Data Path Parity Check Error" },
+ { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
+ { true, "TTES", "TX FSM Timeout Error" },
+ { true, "RTES", "RX FSM Timeout Error" },
+ { true, "CTES", "CSR FSM Timeout Error" },
+ { true, "ATES", "APP FSM Timeout Error" },
+ { true, "PTES", "PTP FSM Timeout Error" },
+ { true, "T125ES", "TX125 FSM Timeout Error" },
+ { true, "R125ES", "RX125 FSM Timeout Error" },
+ { true, "RVCTES", "REV MDC FSM Timeout Error" },
+ { true, "MSTTES", "Master Read/Write Timeout Error" },
+ { true, "SLVTES", "Slave Read/Write Timeout Error" },
+ { true, "ATITES", "Application Timeout on ATI Interface Error" },
+ { true, "ARITES", "Application Timeout on ARI Interface Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { true, "FSMPES", "FSM State Parity Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwmac5_handle_mac_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + MAC_DPP_FSM_INT_STATUS);
+ writel(value, ioaddr + MAC_DPP_FSM_INT_STATUS);
+
+ dwmac5_log_error(ndev, value, correctable, "MAC", dwmac5_mac_errors,
+ STAT_OFF(mac_errors), stats);
+}
+
+static const struct dwmac5_error_desc dwmac5_mtl_errors[32]= {
+ { true, "TXCES", "MTL TX Memory Error" },
+ { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
+ { true, "TXUES", "MTL TX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "RXCES", "MTL RX Memory Error" },
+ { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
+ { true, "RXUES", "MTL RX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { true, "ECES", "MTL EST Memory Error" },
+ { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
+ { true, "EUES", "MTL EST Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { true, "RPCES", "MTL RX Parser Memory Error" },
+ { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
+ { true, "RPUES", "MTL RX Parser Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwmac5_handle_mtl_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + MTL_ECC_INT_STATUS);
+ writel(value, ioaddr + MTL_ECC_INT_STATUS);
+
+ dwmac5_log_error(ndev, value, correctable, "MTL", dwmac5_mtl_errors,
+ STAT_OFF(mtl_errors), stats);
+}
+
+static const struct dwmac5_error_desc dwmac5_dma_errors[32]= {
+ { true, "TCES", "DMA TSO Memory Error" },
+ { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
+ { true, "TUES", "DMA TSO Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 4 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 5 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 6 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 8 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 9 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 10 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 12 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 13 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 14 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwmac5_handle_dma_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + DMA_ECC_INT_STATUS);
+ writel(value, ioaddr + DMA_ECC_INT_STATUS);
+
+ dwmac5_log_error(ndev, value, correctable, "DMA", dwmac5_dma_errors,
+ STAT_OFF(dma_errors), stats);
+}
+
+int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_feat_cfg)
+{
+ struct stmmac_safety_feature_cfg all_safety_feats = {
+ .tsoee = 1,
+ .mrxpee = 1,
+ .mestee = 1,
+ .mrxee = 1,
+ .mtxee = 1,
+ .epsi = 1,
+ .edpp = 1,
+ .prtyen = 1,
+ .tmouten = 1,
+ };
+ u32 value;
+
+ if (!asp)
+ return -EINVAL;
+
+ if (!safety_feat_cfg)
+ safety_feat_cfg = &all_safety_feats;
+
+ /* 1. Enable Safety Features */
+ value = readl(ioaddr + MTL_ECC_CONTROL);
+ value |= MEEAO; /* MTL ECC Error Addr Status Override */
+ if (safety_feat_cfg->tsoee)
+ value |= TSOEE; /* TSO ECC */
+ if (safety_feat_cfg->mrxpee)
+ value |= MRXPEE; /* MTL RX Parser ECC */
+ if (safety_feat_cfg->mestee)
+ value |= MESTEE; /* MTL EST ECC */
+ if (safety_feat_cfg->mrxee)
+ value |= MRXEE; /* MTL RX FIFO ECC */
+ if (safety_feat_cfg->mtxee)
+ value |= MTXEE; /* MTL TX FIFO ECC */
+ writel(value, ioaddr + MTL_ECC_CONTROL);
+
+ /* 2. Enable MTL Safety Interrupts */
+ value = readl(ioaddr + MTL_ECC_INT_ENABLE);
+ value |= RPCEIE; /* RX Parser Memory Correctable Error */
+ value |= ECEIE; /* EST Memory Correctable Error */
+ value |= RXCEIE; /* RX Memory Correctable Error */
+ value |= TXCEIE; /* TX Memory Correctable Error */
+ writel(value, ioaddr + MTL_ECC_INT_ENABLE);
+
+ /* 3. Enable DMA Safety Interrupts */
+ value = readl(ioaddr + DMA_ECC_INT_ENABLE);
+ value |= TCEIE; /* TSO Memory Correctable Error */
+ writel(value, ioaddr + DMA_ECC_INT_ENABLE);
+
+ /* Only ECC Protection for External Memory feature is selected */
+ if (asp <= 0x1)
+ return 0;
+
+ /* 5. Enable Parity and Timeout for FSM */
+ value = readl(ioaddr + MAC_FSM_CONTROL);
+ if (safety_feat_cfg->prtyen)
+ value |= PRTYEN; /* FSM Parity Feature */
+ if (safety_feat_cfg->tmouten)
+ value |= TMOUTEN; /* FSM Timeout Feature */
+ writel(value, ioaddr + MAC_FSM_CONTROL);
+
+ /* 4. Enable Data Parity Protection */
+ value = readl(ioaddr + MTL_DPP_CONTROL);
+ if (safety_feat_cfg->edpp)
+ value |= EDPP;
+ writel(value, ioaddr + MTL_DPP_CONTROL);
+
+ /*
+ * All the Automotive Safety features are selected without the "Parity
+ * Port Enable for external interface" feature.
+ */
+ if (asp <= 0x2)
+ return 0;
+
+ if (safety_feat_cfg->epsi)
+ value |= EPSI;
+ writel(value, ioaddr + MTL_DPP_CONTROL);
+ return 0;
+}
+
+int dwmac5_safety_feat_irq_status(struct net_device *ndev,
+ void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_stats *stats)
+{
+ bool err, corr;
+ u32 mtl, dma;
+ int ret = 0;
+
+ if (!asp)
+ return -EINVAL;
+
+ mtl = readl(ioaddr + MTL_SAFETY_INT_STATUS);
+ dma = readl(ioaddr + DMA_SAFETY_INT_STATUS);
+
+ err = (mtl & MCSIS) || (dma & MCSIS);
+ corr = false;
+ if (err) {
+ dwmac5_handle_mac_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = (mtl & (MEUIS | MECIS)) || (dma & (MSUIS | MSCIS));
+ corr = (mtl & MECIS) || (dma & MSCIS);
+ if (err) {
+ dwmac5_handle_mtl_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = dma & (DEUIS | DECIS);
+ corr = dma & DECIS;
+ if (err) {
+ dwmac5_handle_dma_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ return ret;
+}
+
+static const struct dwmac5_error {
+ const struct dwmac5_error_desc *desc;
+} dwmac5_all_errors[] = {
+ { dwmac5_mac_errors },
+ { dwmac5_mtl_errors },
+ { dwmac5_dma_errors },
+};
+
+int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count, const char **desc)
+{
+ int module = index / 32, offset = index % 32;
+ unsigned long *ptr = (unsigned long *)stats;
+
+ if (module >= ARRAY_SIZE(dwmac5_all_errors))
+ return -EINVAL;
+ if (!dwmac5_all_errors[module].desc[offset].valid)
+ return -EINVAL;
+ if (count)
+ *count = *(ptr + index);
+ if (desc)
+ *desc = dwmac5_all_errors[module].desc[offset].desc;
+ return 0;
+}
+
+static int dwmac5_rxp_disable(void __iomem *ioaddr)
+{
+ u32 val;
+
+ val = readl(ioaddr + MTL_OPERATION_MODE);
+ val &= ~MTL_FRPE;
+ writel(val, ioaddr + MTL_OPERATION_MODE);
+
+ return readl_poll_timeout(ioaddr + MTL_RXP_CONTROL_STATUS, val,
+ val & RXPI, 1, 10000);
+}
+
+static void dwmac5_rxp_enable(void __iomem *ioaddr)
+{
+ u32 val;
+
+ val = readl(ioaddr + MTL_OPERATION_MODE);
+ val |= MTL_FRPE;
+ writel(val, ioaddr + MTL_OPERATION_MODE);
+}
+
+static int dwmac5_rxp_update_single_entry(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entry,
+ int pos)
+{
+ int ret, i;
+
+ for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
+ int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
+ u32 val;
+
+ /* Wait for ready */
+ ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
+ val, !(val & STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+
+ /* Write data */
+ val = *((u32 *)&entry->val + i);
+ writel(val, ioaddr + MTL_RXP_IACC_DATA);
+
+ /* Write pos */
+ val = real_pos & ADDR;
+ writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
+
+ /* Write OP */
+ val |= WRRDN;
+ writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
+
+ /* Start Write */
+ val |= STARTBUSY;
+ writel(val, ioaddr + MTL_RXP_IACC_CTRL_STATUS);
+
+ /* Wait for done */
+ ret = readl_poll_timeout(ioaddr + MTL_RXP_IACC_CTRL_STATUS,
+ val, !(val & STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct stmmac_tc_entry *
+dwmac5_rxp_get_next_entry(struct stmmac_tc_entry *entries, unsigned int count,
+ u32 curr_prio)
+{
+ struct stmmac_tc_entry *entry;
+ u32 min_prio = ~0x0;
+ int i, min_prio_idx;
+ bool found = false;
+
+ for (i = count - 1; i >= 0; i--) {
+ entry = &entries[i];
+
+ /* Do not update unused entries */
+ if (!entry->in_use)
+ continue;
+ /* Do not update already updated entries (i.e. fragments) */
+ if (entry->in_hw)
+ continue;
+ /* Let last entry be updated last */
+ if (entry->is_last)
+ continue;
+ /* Do not return fragments */
+ if (entry->is_frag)
+ continue;
+ /* Check if we already checked this prio */
+ if (entry->prio < curr_prio)
+ continue;
+ /* Check if this is the minimum prio */
+ if (entry->prio < min_prio) {
+ min_prio = entry->prio;
+ min_prio_idx = i;
+ found = true;
+ }
+ }
+
+ if (found)
+ return &entries[min_prio_idx];
+ return NULL;
+}
+
+int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
+ unsigned int count)
+{
+ struct stmmac_tc_entry *entry, *frag;
+ int i, ret, nve = 0;
+ u32 curr_prio = 0;
+ u32 old_val, val;
+
+ /* Force disable RX */
+ old_val = readl(ioaddr + GMAC_CONFIG);
+ val = old_val & ~GMAC_CONFIG_RE;
+ writel(val, ioaddr + GMAC_CONFIG);
+
+ /* Disable RX Parser */
+ ret = dwmac5_rxp_disable(ioaddr);
+ if (ret)
+ goto re_enable;
+
+ /* Set all entries as NOT in HW */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ entry->in_hw = false;
+ }
+
+ /* Update entries by reverse order */
+ while (1) {
+ entry = dwmac5_rxp_get_next_entry(entries, count, curr_prio);
+ if (!entry)
+ break;
+
+ curr_prio = entry->prio;
+ frag = entry->frag_ptr;
+
+ /* Set special fragment requirements */
+ if (frag) {
+ entry->val.af = 0;
+ entry->val.rf = 0;
+ entry->val.nc = 1;
+ entry->val.ok_index = nve + 2;
+ }
+
+ ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ entry->in_hw = true;
+
+ if (frag && !frag->in_hw) {
+ ret = dwmac5_rxp_update_single_entry(ioaddr, frag, nve);
+ if (ret)
+ goto re_enable;
+ frag->table_pos = nve++;
+ frag->in_hw = true;
+ }
+ }
+
+ if (!nve)
+ goto re_enable;
+
+ /* Update all pass entry */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ if (!entry->is_last)
+ continue;
+
+ ret = dwmac5_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ }
+
+ /* Assume n. of parsable entries == n. of valid entries */
+ val = (nve << 16) & NPE;
+ val |= nve & NVE;
+ writel(val, ioaddr + MTL_RXP_CONTROL_STATUS);
+
+ /* Enable RX Parser */
+ dwmac5_rxp_enable(ioaddr);
+
+re_enable:
+ /* Re-enable RX */
+ writel(old_val, ioaddr + GMAC_CONFIG);
+ return ret;
+}
+
+int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags)
+{
+ u32 tnsec = readl(ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
+ u32 val = readl(ioaddr + MAC_PPS_CONTROL);
+ u64 period;
+
+ if (!cfg->available)
+ return -EINVAL;
+ if (tnsec & TRGTBUSY0)
+ return -EBUSY;
+ if (!sub_second_inc || !systime_flags)
+ return -EINVAL;
+
+ val &= ~PPSx_MASK(index);
+
+ if (!enable) {
+ val |= PPSCMDx(index, 0x5);
+ val |= PPSEN0;
+ writel(val, ioaddr + MAC_PPS_CONTROL);
+ return 0;
+ }
+
+ val |= TRGTMODSELx(index, 0x2);
+ val |= PPSEN0;
+ writel(val, ioaddr + MAC_PPS_CONTROL);
+
+ writel(cfg->start.tv_sec, ioaddr + MAC_PPSx_TARGET_TIME_SEC(index));
+
+ if (!(systime_flags & PTP_TCR_TSCTRLSSR))
+ cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
+ writel(cfg->start.tv_nsec, ioaddr + MAC_PPSx_TARGET_TIME_NSEC(index));
+
+ period = cfg->period.tv_sec * 1000000000;
+ period += cfg->period.tv_nsec;
+
+ do_div(period, sub_second_inc);
+
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + MAC_PPSx_INTERVAL(index));
+
+ period >>= 1;
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + MAC_PPSx_WIDTH(index));
+
+ /* Finally, activate it */
+ val |= PPSCMDx(index, 0x2);
+ writel(val, ioaddr + MAC_PPS_CONTROL);
+ return 0;
+}
+
+static int dwmac5_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, ioaddr + MTL_EST_GCL_DATA);
+
+ ctrl = (reg << ADDR_SHIFT);
+ ctrl |= gcl ? 0 : GCRR;
+
+ writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
+
+ ctrl |= SRWO;
+ writel(ctrl, ioaddr + MTL_EST_GCL_CONTROL);
+
+ return readl_poll_timeout(ioaddr + MTL_EST_GCL_CONTROL,
+ ctrl, !(ctrl & SRWO), 100, 5000);
+}
+
+int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ int i, ret = 0x0;
+ u32 ctrl;
+
+ ret |= dwmac5_est_write(ioaddr, BTR_LOW, cfg->btr[0], false);
+ ret |= dwmac5_est_write(ioaddr, BTR_HIGH, cfg->btr[1], false);
+ ret |= dwmac5_est_write(ioaddr, TER, cfg->ter, false);
+ ret |= dwmac5_est_write(ioaddr, LLR, cfg->gcl_size, false);
+ ret |= dwmac5_est_write(ioaddr, CTR_LOW, cfg->ctr[0], false);
+ ret |= dwmac5_est_write(ioaddr, CTR_HIGH, cfg->ctr[1], false);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = dwmac5_est_write(ioaddr, i, cfg->gcl[i], true);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = readl(ioaddr + MTL_EST_CONTROL);
+ ctrl &= ~PTOV;
+ ctrl |= ((1000000000 / ptp_rate) * 6) << PTOV_SHIFT;
+ if (cfg->enable)
+ ctrl |= EEST | SSWL;
+ else
+ ctrl &= ~EEST;
+
+ writel(ctrl, ioaddr + MTL_EST_CONTROL);
+
+ /* Configure EST interrupt */
+ if (cfg->enable)
+ ctrl = (IECGCE | IEHS | IEHF | IEBE | IECC);
+ else
+ ctrl = 0;
+
+ writel(ctrl, ioaddr + MTL_EST_INT_EN);
+
+ return 0;
+}
+
+void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt)
+{
+ u32 status, value, feqn, hbfq, hbfs, btrl;
+ u32 txqcnt_mask = (1 << txqcnt) - 1;
+
+ status = readl(ioaddr + MTL_EST_STATUS);
+
+ value = (CGCE | HLBS | HLBF | BTRE | SWLC);
+
+ /* Return if there is no error */
+ if (!(status & value))
+ return;
+
+ if (status & CGCE) {
+ /* Clear Interrupt */
+ writel(CGCE, ioaddr + MTL_EST_STATUS);
+
+ x->mtl_est_cgce++;
+ }
+
+ if (status & HLBS) {
+ value = readl(ioaddr + MTL_EST_SCH_ERR);
+ value &= txqcnt_mask;
+
+ x->mtl_est_hlbs++;
+
+ /* Clear Interrupt */
+ writel(value, ioaddr + MTL_EST_SCH_ERR);
+
+ /* Collecting info to shows all the queues that has HLBS
+ * issue. The only way to clear this is to clear the
+ * statistic
+ */
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(sched) Queue 0x%x\n", value);
+ }
+
+ if (status & HLBF) {
+ value = readl(ioaddr + MTL_EST_FRM_SZ_ERR);
+ feqn = value & txqcnt_mask;
+
+ value = readl(ioaddr + MTL_EST_FRM_SZ_CAP);
+ hbfq = (value & SZ_CAP_HBFQ_MASK(txqcnt)) >> SZ_CAP_HBFQ_SHIFT;
+ hbfs = value & SZ_CAP_HBFS_MASK;
+
+ x->mtl_est_hlbf++;
+
+ /* Clear Interrupt */
+ writel(feqn, ioaddr + MTL_EST_FRM_SZ_ERR);
+
+ if (net_ratelimit())
+ netdev_err(dev, "EST: HLB(size) Queue %u Size %u\n",
+ hbfq, hbfs);
+ }
+
+ if (status & BTRE) {
+ if ((status & BTRL) == BTRL_MAX)
+ x->mtl_est_btrlm++;
+ else
+ x->mtl_est_btre++;
+
+ btrl = (status & BTRL) >> BTRL_SHIFT;
+
+ if (net_ratelimit())
+ netdev_info(dev, "EST: BTR Error Loop Count %u\n",
+ btrl);
+
+ writel(BTRE, ioaddr + MTL_EST_STATUS);
+ }
+
+ if (status & SWLC) {
+ writel(SWLC, ioaddr + MTL_EST_STATUS);
+ netdev_info(dev, "EST: SWOL has been switched\n");
+ }
+}
+
+void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
+ bool enable)
+{
+ u32 value;
+
+ if (enable) {
+ cfg->fpe_csr = EFPE;
+ value = readl(ioaddr + GMAC_RXQ_CTRL1);
+ value &= ~GMAC_RXQCTRL_FPRQ;
+ value |= (num_rxq - 1) << GMAC_RXQCTRL_FPRQ_SHIFT;
+ writel(value, ioaddr + GMAC_RXQ_CTRL1);
+ } else {
+ cfg->fpe_csr = 0;
+ }
+ writel(cfg->fpe_csr, ioaddr + MAC_FPE_CTRL_STS);
+}
+
+int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev)
+{
+ u32 value;
+ int status;
+
+ status = FPE_EVENT_UNKNOWN;
+
+ /* Reads from the MAC_FPE_CTRL_STS register should only be performed
+ * here, since the status flags of MAC_FPE_CTRL_STS are "clear on read"
+ */
+ value = readl(ioaddr + MAC_FPE_CTRL_STS);
+
+ if (value & TRSP) {
+ status |= FPE_EVENT_TRSP;
+ netdev_info(dev, "FPE: Respond mPacket is transmitted\n");
+ }
+
+ if (value & TVER) {
+ status |= FPE_EVENT_TVER;
+ netdev_info(dev, "FPE: Verify mPacket is transmitted\n");
+ }
+
+ if (value & RRSP) {
+ status |= FPE_EVENT_RRSP;
+ netdev_info(dev, "FPE: Respond mPacket is received\n");
+ }
+
+ if (value & RVER) {
+ status |= FPE_EVENT_RVER;
+ netdev_info(dev, "FPE: Verify mPacket is received\n");
+ }
+
+ return status;
+}
+
+void dwmac5_fpe_send_mpacket(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ enum stmmac_mpacket_type type)
+{
+ u32 value = cfg->fpe_csr;
+
+ if (type == MPACKET_VERIFY)
+ value |= SVER;
+ else if (type == MPACKET_RESPONSE)
+ value |= SRSP;
+
+ writel(value, ioaddr + MAC_FPE_CTRL_STS);
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac5.h b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
new file mode 100644
index 000000000..34e620790
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac5.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+// Copyright (c) 2017 Synopsys, Inc. and/or its affiliates.
+// stmmac Support for 5.xx Ethernet QoS cores
+
+#ifndef __DWMAC5_H__
+#define __DWMAC5_H__
+
+#define MAC_DPP_FSM_INT_STATUS 0x00000140
+#define MAC_AXI_SLV_DPE_ADDR_STATUS 0x00000144
+#define MAC_FSM_CONTROL 0x00000148
+#define PRTYEN BIT(1)
+#define TMOUTEN BIT(0)
+
+#define MAC_FPE_CTRL_STS 0x00000234
+#define TRSP BIT(19)
+#define TVER BIT(18)
+#define RRSP BIT(17)
+#define RVER BIT(16)
+#define SRSP BIT(2)
+#define SVER BIT(1)
+#define EFPE BIT(0)
+
+#define MAC_PPS_CONTROL 0x00000b70
+#define PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define PPS_MINIDX(x) ((x) * 8)
+#define PPSx_MASK(x) GENMASK(PPS_MAXIDX(x), PPS_MINIDX(x))
+#define MCGRENx(x) BIT(PPS_MAXIDX(x))
+#define TRGTMODSELx(x, val) \
+ GENMASK(PPS_MAXIDX(x) - 1, PPS_MAXIDX(x) - 2) & \
+ ((val) << (PPS_MAXIDX(x) - 2))
+#define PPSCMDx(x, val) \
+ GENMASK(PPS_MINIDX(x) + 3, PPS_MINIDX(x)) & \
+ ((val) << PPS_MINIDX(x))
+#define PPSEN0 BIT(4)
+#define MAC_PPSx_TARGET_TIME_SEC(x) (0x00000b80 + ((x) * 0x10))
+#define MAC_PPSx_TARGET_TIME_NSEC(x) (0x00000b84 + ((x) * 0x10))
+#define TRGTBUSY0 BIT(31)
+#define TTSL0 GENMASK(30, 0)
+#define MAC_PPSx_INTERVAL(x) (0x00000b88 + ((x) * 0x10))
+#define MAC_PPSx_WIDTH(x) (0x00000b8c + ((x) * 0x10))
+
+#define MTL_EST_CONTROL 0x00000c50
+#define PTOV GENMASK(31, 24)
+#define PTOV_SHIFT 24
+#define SSWL BIT(1)
+#define EEST BIT(0)
+
+#define MTL_EST_STATUS 0x00000c58
+#define BTRL GENMASK(11, 8)
+#define BTRL_SHIFT 8
+#define BTRL_MAX (0xF << BTRL_SHIFT)
+#define SWOL BIT(7)
+#define SWOL_SHIFT 7
+#define CGCE BIT(4)
+#define HLBS BIT(3)
+#define HLBF BIT(2)
+#define BTRE BIT(1)
+#define SWLC BIT(0)
+
+#define MTL_EST_SCH_ERR 0x00000c60
+#define MTL_EST_FRM_SZ_ERR 0x00000c64
+#define MTL_EST_FRM_SZ_CAP 0x00000c68
+#define SZ_CAP_HBFS_MASK GENMASK(14, 0)
+#define SZ_CAP_HBFQ_SHIFT 16
+#define SZ_CAP_HBFQ_MASK(_val) ({ typeof(_val) (val) = (_val); \
+ ((val) > 4 ? GENMASK(18, 16) : \
+ (val) > 2 ? GENMASK(17, 16) : \
+ BIT(16)); })
+
+#define MTL_EST_INT_EN 0x00000c70
+#define IECGCE CGCE
+#define IEHS HLBS
+#define IEHF HLBF
+#define IEBE BTRE
+#define IECC SWLC
+
+#define MTL_EST_GCL_CONTROL 0x00000c80
+#define BTR_LOW 0x0
+#define BTR_HIGH 0x1
+#define CTR_LOW 0x2
+#define CTR_HIGH 0x3
+#define TER 0x4
+#define LLR 0x5
+#define ADDR_SHIFT 8
+#define GCRR BIT(2)
+#define SRWO BIT(0)
+#define MTL_EST_GCL_DATA 0x00000c84
+
+#define MTL_RXP_CONTROL_STATUS 0x00000ca0
+#define RXPI BIT(31)
+#define NPE GENMASK(23, 16)
+#define NVE GENMASK(7, 0)
+#define MTL_RXP_IACC_CTRL_STATUS 0x00000cb0
+#define STARTBUSY BIT(31)
+#define RXPEIEC GENMASK(22, 21)
+#define RXPEIEE BIT(20)
+#define WRRDN BIT(16)
+#define ADDR GENMASK(15, 0)
+#define MTL_RXP_IACC_DATA 0x00000cb4
+#define MTL_ECC_CONTROL 0x00000cc0
+#define MEEAO BIT(8)
+#define TSOEE BIT(4)
+#define MRXPEE BIT(3)
+#define MESTEE BIT(2)
+#define MRXEE BIT(1)
+#define MTXEE BIT(0)
+
+#define MTL_SAFETY_INT_STATUS 0x00000cc4
+#define MCSIS BIT(31)
+#define MEUIS BIT(1)
+#define MECIS BIT(0)
+#define MTL_ECC_INT_ENABLE 0x00000cc8
+#define RPCEIE BIT(12)
+#define ECEIE BIT(8)
+#define RXCEIE BIT(4)
+#define TXCEIE BIT(0)
+#define MTL_ECC_INT_STATUS 0x00000ccc
+#define MTL_DPP_CONTROL 0x00000ce0
+#define EPSI BIT(2)
+#define OPE BIT(1)
+#define EDPP BIT(0)
+
+#define DMA_SAFETY_INT_STATUS 0x00001080
+#define MSUIS BIT(29)
+#define MSCIS BIT(28)
+#define DEUIS BIT(1)
+#define DECIS BIT(0)
+#define DMA_ECC_INT_ENABLE 0x00001084
+#define TCEIE BIT(0)
+#define DMA_ECC_INT_STATUS 0x00001088
+
+/* EQoS version 5.xx VLAN Tag Filter Fail Packets Queuing */
+#define GMAC_RXQ_CTRL4 0x00000094
+#define GMAC_RXQCTRL_VFFQ_MASK GENMASK(19, 17)
+#define GMAC_RXQCTRL_VFFQ_SHIFT 17
+#define GMAC_RXQCTRL_VFFQE BIT(16)
+
+#define GMAC_INT_FPE_EN BIT(17)
+
+int dwmac5_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_cfg);
+int dwmac5_safety_feat_irq_status(struct net_device *ndev,
+ void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_stats *stats);
+int dwmac5_safety_feat_dump(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count, const char **desc);
+int dwmac5_rxp_config(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
+ unsigned int count);
+int dwmac5_flex_pps_config(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags);
+int dwmac5_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+void dwmac5_est_irq_status(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
+void dwmac5_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
+ bool enable);
+void dwmac5_fpe_send_mpacket(void __iomem *ioaddr,
+ struct stmmac_fpe_cfg *cfg,
+ enum stmmac_mpacket_type type);
+int dwmac5_fpe_irq_status(void __iomem *ioaddr, struct net_device *dev);
+
+#endif /* __DWMAC5_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
new file mode 100644
index 000000000..acd70b9a3
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*******************************************************************************
+ DWMAC DMA Header file.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __DWMAC_DMA_H__
+#define __DWMAC_DMA_H__
+
+/* DMA CRS Control and Status Register Mapping */
+#define DMA_BUS_MODE 0x00001000 /* Bus Mode */
+#define DMA_XMT_POLL_DEMAND 0x00001004 /* Transmit Poll Demand */
+#define DMA_RCV_POLL_DEMAND 0x00001008 /* Received Poll Demand */
+#define DMA_RCV_BASE_ADDR 0x0000100c /* Receive List Base */
+#define DMA_TX_BASE_ADDR 0x00001010 /* Transmit List Base */
+#define DMA_STATUS 0x00001014 /* Status Register */
+#define DMA_CONTROL 0x00001018 /* Ctrl (Operational Mode) */
+#define DMA_INTR_ENA 0x0000101c /* Interrupt Enable */
+#define DMA_MISSED_FRAME_CTR 0x00001020 /* Missed Frame Counter */
+
+/* SW Reset */
+#define DMA_BUS_MODE_SFT_RESET 0x00000001 /* Software Reset */
+
+/* Rx watchdog register */
+#define DMA_RX_WATCHDOG 0x00001024
+
+/* AXI Master Bus Mode */
+#define DMA_AXI_BUS_MODE 0x00001028
+
+#define DMA_AXI_EN_LPI BIT(31)
+#define DMA_AXI_LPI_XIT_FRM BIT(30)
+#define DMA_AXI_WR_OSR_LMT GENMASK(23, 20)
+#define DMA_AXI_WR_OSR_LMT_SHIFT 20
+#define DMA_AXI_WR_OSR_LMT_MASK 0xf
+#define DMA_AXI_RD_OSR_LMT GENMASK(19, 16)
+#define DMA_AXI_RD_OSR_LMT_SHIFT 16
+#define DMA_AXI_RD_OSR_LMT_MASK 0xf
+
+#define DMA_AXI_OSR_MAX 0xf
+#define DMA_AXI_MAX_OSR_LIMIT ((DMA_AXI_OSR_MAX << DMA_AXI_WR_OSR_LMT_SHIFT) | \
+ (DMA_AXI_OSR_MAX << DMA_AXI_RD_OSR_LMT_SHIFT))
+#define DMA_AXI_1KBBE BIT(13)
+#define DMA_AXI_AAL BIT(12)
+#define DMA_AXI_BLEN256 BIT(7)
+#define DMA_AXI_BLEN128 BIT(6)
+#define DMA_AXI_BLEN64 BIT(5)
+#define DMA_AXI_BLEN32 BIT(4)
+#define DMA_AXI_BLEN16 BIT(3)
+#define DMA_AXI_BLEN8 BIT(2)
+#define DMA_AXI_BLEN4 BIT(1)
+#define DMA_BURST_LEN_DEFAULT (DMA_AXI_BLEN256 | DMA_AXI_BLEN128 | \
+ DMA_AXI_BLEN64 | DMA_AXI_BLEN32 | \
+ DMA_AXI_BLEN16 | DMA_AXI_BLEN8 | \
+ DMA_AXI_BLEN4)
+
+#define DMA_AXI_UNDEF BIT(0)
+
+#define DMA_AXI_BURST_LEN_MASK 0x000000FE
+
+#define DMA_CUR_TX_BUF_ADDR 0x00001050 /* Current Host Tx Buffer */
+#define DMA_CUR_RX_BUF_ADDR 0x00001054 /* Current Host Rx Buffer */
+#define DMA_HW_FEATURE 0x00001058 /* HW Feature Register */
+
+/* DMA Control register defines */
+#define DMA_CONTROL_ST 0x00002000 /* Start/Stop Transmission */
+#define DMA_CONTROL_SR 0x00000002 /* Start/Stop Receive */
+
+/* DMA Normal interrupt */
+#define DMA_INTR_ENA_NIE 0x00010000 /* Normal Summary */
+#define DMA_INTR_ENA_TIE 0x00000001 /* Transmit Interrupt */
+#define DMA_INTR_ENA_TUE 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_INTR_ENA_RIE 0x00000040 /* Receive Interrupt */
+#define DMA_INTR_ENA_ERE 0x00004000 /* Early Receive */
+
+#define DMA_INTR_NORMAL (DMA_INTR_ENA_NIE | DMA_INTR_ENA_RIE | \
+ DMA_INTR_ENA_TIE)
+
+/* DMA Abnormal interrupt */
+#define DMA_INTR_ENA_AIE 0x00008000 /* Abnormal Summary */
+#define DMA_INTR_ENA_FBE 0x00002000 /* Fatal Bus Error */
+#define DMA_INTR_ENA_ETE 0x00000400 /* Early Transmit */
+#define DMA_INTR_ENA_RWE 0x00000200 /* Receive Watchdog */
+#define DMA_INTR_ENA_RSE 0x00000100 /* Receive Stopped */
+#define DMA_INTR_ENA_RUE 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_INTR_ENA_UNE 0x00000020 /* Tx Underflow */
+#define DMA_INTR_ENA_OVE 0x00000010 /* Receive Overflow */
+#define DMA_INTR_ENA_TJE 0x00000008 /* Transmit Jabber */
+#define DMA_INTR_ENA_TSE 0x00000002 /* Transmit Stopped */
+
+#define DMA_INTR_ABNORMAL (DMA_INTR_ENA_AIE | DMA_INTR_ENA_FBE | \
+ DMA_INTR_ENA_UNE)
+
+/* DMA default interrupt mask */
+#define DMA_INTR_DEFAULT_MASK (DMA_INTR_NORMAL | DMA_INTR_ABNORMAL)
+#define DMA_INTR_DEFAULT_RX (DMA_INTR_ENA_RIE)
+#define DMA_INTR_DEFAULT_TX (DMA_INTR_ENA_TIE)
+
+/* DMA Status register defines */
+#define DMA_STATUS_GLPII 0x40000000 /* GMAC LPI interrupt */
+#define DMA_STATUS_GPI 0x10000000 /* PMT interrupt */
+#define DMA_STATUS_GMI 0x08000000 /* MMC interrupt */
+#define DMA_STATUS_GLI 0x04000000 /* GMAC Line interface int */
+#define DMA_STATUS_EB_MASK 0x00380000 /* Error Bits Mask */
+#define DMA_STATUS_EB_TX_ABORT 0x00080000 /* Error Bits - TX Abort */
+#define DMA_STATUS_EB_RX_ABORT 0x00100000 /* Error Bits - RX Abort */
+#define DMA_STATUS_TS_MASK 0x00700000 /* Transmit Process State */
+#define DMA_STATUS_TS_SHIFT 20
+#define DMA_STATUS_RS_MASK 0x000e0000 /* Receive Process State */
+#define DMA_STATUS_RS_SHIFT 17
+#define DMA_STATUS_NIS 0x00010000 /* Normal Interrupt Summary */
+#define DMA_STATUS_AIS 0x00008000 /* Abnormal Interrupt Summary */
+#define DMA_STATUS_ERI 0x00004000 /* Early Receive Interrupt */
+#define DMA_STATUS_FBI 0x00002000 /* Fatal Bus Error Interrupt */
+#define DMA_STATUS_ETI 0x00000400 /* Early Transmit Interrupt */
+#define DMA_STATUS_RWT 0x00000200 /* Receive Watchdog Timeout */
+#define DMA_STATUS_RPS 0x00000100 /* Receive Process Stopped */
+#define DMA_STATUS_RU 0x00000080 /* Receive Buffer Unavailable */
+#define DMA_STATUS_RI 0x00000040 /* Receive Interrupt */
+#define DMA_STATUS_UNF 0x00000020 /* Transmit Underflow */
+#define DMA_STATUS_OVF 0x00000010 /* Receive Overflow */
+#define DMA_STATUS_TJT 0x00000008 /* Transmit Jabber Timeout */
+#define DMA_STATUS_TU 0x00000004 /* Transmit Buffer Unavailable */
+#define DMA_STATUS_TPS 0x00000002 /* Transmit Process Stopped */
+#define DMA_STATUS_TI 0x00000001 /* Transmit Interrupt */
+#define DMA_CONTROL_FTF 0x00100000 /* Flush transmit FIFO */
+
+#define DMA_STATUS_MSK_COMMON (DMA_STATUS_NIS | \
+ DMA_STATUS_AIS | \
+ DMA_STATUS_FBI)
+
+#define DMA_STATUS_MSK_RX (DMA_STATUS_ERI | \
+ DMA_STATUS_RWT | \
+ DMA_STATUS_RPS | \
+ DMA_STATUS_RU | \
+ DMA_STATUS_RI | \
+ DMA_STATUS_OVF | \
+ DMA_STATUS_MSK_COMMON)
+
+#define DMA_STATUS_MSK_TX (DMA_STATUS_ETI | \
+ DMA_STATUS_UNF | \
+ DMA_STATUS_TJT | \
+ DMA_STATUS_TU | \
+ DMA_STATUS_TPS | \
+ DMA_STATUS_TI | \
+ DMA_STATUS_MSK_COMMON)
+
+#define NUM_DWMAC100_DMA_REGS 9
+#define NUM_DWMAC1000_DMA_REGS 23
+#define NUM_DWMAC4_DMA_REGS 27
+
+void dwmac_enable_dma_transmission(void __iomem *ioaddr);
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx);
+void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan);
+void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan);
+int dwmac_dma_interrupt(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 chan, u32 dir);
+int dwmac_dma_reset(void __iomem *ioaddr);
+
+#endif /* __DWMAC_DMA_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
new file mode 100644
index 000000000..9b6138b11
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_lib.c
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include "common.h"
+#include "dwmac_dma.h"
+
+#define GMAC_HI_REG_AE 0x80000000
+
+int dwmac_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + DMA_BUS_MODE);
+
+ /* DMA SW reset */
+ value |= DMA_BUS_MODE_SFT_RESET;
+ writel(value, ioaddr + DMA_BUS_MODE);
+
+ return readl_poll_timeout(ioaddr + DMA_BUS_MODE, value,
+ !(value & DMA_BUS_MODE_SFT_RESET),
+ 10000, 200000);
+}
+
+/* CSR1 enables the transmit DMA to check for new descriptor */
+void dwmac_enable_dma_transmission(void __iomem *ioaddr)
+{
+ writel(1, ioaddr + DMA_XMT_POLL_DEMAND);
+}
+
+void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+
+ if (rx)
+ value |= DMA_INTR_DEFAULT_RX;
+ if (tx)
+ value |= DMA_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_disable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+
+ if (rx)
+ value &= ~DMA_INTR_DEFAULT_RX;
+ if (tx)
+ value &= ~DMA_INTR_DEFAULT_TX;
+
+ writel(value, ioaddr + DMA_INTR_ENA);
+}
+
+void dwmac_dma_start_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+void dwmac_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_ST;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+void dwmac_dma_start_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value |= DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+void dwmac_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CONTROL);
+ value &= ~DMA_CONTROL_SR;
+ writel(value, ioaddr + DMA_CONTROL);
+}
+
+#ifdef DWMAC_DMA_DEBUG
+static void show_tx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_TS_MASK) >> DMA_STATUS_TS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_debug("- TX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_debug("- TX (Running): Fetching the Tx desc\n");
+ break;
+ case 2:
+ pr_debug("- TX (Running): Waiting for end of tx\n");
+ break;
+ case 3:
+ pr_debug("- TX (Running): Reading the data "
+ "and queuing the data into the Tx buf\n");
+ break;
+ case 6:
+ pr_debug("- TX (Suspended): Tx Buff Underflow "
+ "or an unavailable Transmit descriptor\n");
+ break;
+ case 7:
+ pr_debug("- TX (Running): Closing Tx descriptor\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void show_rx_process_state(unsigned int status)
+{
+ unsigned int state;
+ state = (status & DMA_STATUS_RS_MASK) >> DMA_STATUS_RS_SHIFT;
+
+ switch (state) {
+ case 0:
+ pr_debug("- RX (Stopped): Reset or Stop command\n");
+ break;
+ case 1:
+ pr_debug("- RX (Running): Fetching the Rx desc\n");
+ break;
+ case 2:
+ pr_debug("- RX (Running): Checking for end of pkt\n");
+ break;
+ case 3:
+ pr_debug("- RX (Running): Waiting for Rx pkt\n");
+ break;
+ case 4:
+ pr_debug("- RX (Suspended): Unavailable Rx buf\n");
+ break;
+ case 5:
+ pr_debug("- RX (Running): Closing Rx descriptor\n");
+ break;
+ case 6:
+ pr_debug("- RX(Running): Flushing the current frame"
+ " from the Rx buf\n");
+ break;
+ case 7:
+ pr_debug("- RX (Running): Queuing the Rx frame"
+ " from the Rx buf into memory\n");
+ break;
+ default:
+ break;
+ }
+}
+#endif
+
+int dwmac_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan, u32 dir)
+{
+ int ret = 0;
+ /* read the status register (CSR5) */
+ u32 intr_status = readl(ioaddr + DMA_STATUS);
+
+#ifdef DWMAC_DMA_DEBUG
+ /* Enable it to monitor DMA rx/tx status in case of critical problems */
+ pr_debug("%s: [CSR5: 0x%08x]\n", __func__, intr_status);
+ show_tx_process_state(intr_status);
+ show_rx_process_state(intr_status);
+#endif
+
+ if (dir == DMA_DIR_RX)
+ intr_status &= DMA_STATUS_MSK_RX;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= DMA_STATUS_MSK_TX;
+
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & DMA_STATUS_AIS)) {
+ if (unlikely(intr_status & DMA_STATUS_UNF)) {
+ ret = tx_hard_error_bump_tc;
+ x->tx_undeflow_irq++;
+ }
+ if (unlikely(intr_status & DMA_STATUS_TJT))
+ x->tx_jabber_irq++;
+
+ if (unlikely(intr_status & DMA_STATUS_OVF))
+ x->rx_overflow_irq++;
+
+ if (unlikely(intr_status & DMA_STATUS_RU))
+ x->rx_buf_unav_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RPS))
+ x->rx_process_stopped_irq++;
+ if (unlikely(intr_status & DMA_STATUS_RWT))
+ x->rx_watchdog_irq++;
+ if (unlikely(intr_status & DMA_STATUS_ETI))
+ x->tx_early_irq++;
+ if (unlikely(intr_status & DMA_STATUS_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret = tx_hard_error;
+ }
+ if (unlikely(intr_status & DMA_STATUS_FBI)) {
+ x->fatal_bus_error_irq++;
+ ret = tx_hard_error;
+ }
+ }
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & DMA_STATUS_NIS)) {
+ x->normal_irq_n++;
+ if (likely(intr_status & DMA_STATUS_RI)) {
+ u32 value = readl(ioaddr + DMA_INTR_ENA);
+ /* to schedule NAPI on real RIE event. */
+ if (likely(value & DMA_INTR_ENA_RIE)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ }
+ if (likely(intr_status & DMA_STATUS_TI)) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ if (unlikely(intr_status & DMA_STATUS_ERI))
+ x->rx_early_irq++;
+ }
+ /* Optional hardware blocks, interrupts should be disabled */
+ if (unlikely(intr_status &
+ (DMA_STATUS_GPI | DMA_STATUS_GMI | DMA_STATUS_GLI)))
+ pr_warn("%s: unexpected status %08x\n", __func__, intr_status);
+
+ /* Clear the interrupt by writing a logic 1 to the CSR5[15-0] */
+ writel((intr_status & 0x1ffff), ioaddr + DMA_STATUS);
+
+ return ret;
+}
+
+void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr)
+{
+ u32 csr6 = readl(ioaddr + DMA_CONTROL);
+ writel((csr6 | DMA_CONTROL_FTF), ioaddr + DMA_CONTROL);
+
+ do {} while ((readl(ioaddr + DMA_CONTROL) & DMA_CONTROL_FTF));
+}
+
+void stmmac_set_mac_addr(void __iomem *ioaddr, const u8 addr[6],
+ unsigned int high, unsigned int low)
+{
+ unsigned long data;
+
+ data = (addr[5] << 8) | addr[4];
+ /* For MAC Addr registers we have to set the Address Enable (AE)
+ * bit that has no effect on the High Reg 0 where the bit 31 (MO)
+ * is RO.
+ */
+ writel(data | GMAC_HI_REG_AE, ioaddr + high);
+ data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(data, ioaddr + low);
+}
+EXPORT_SYMBOL_GPL(stmmac_set_mac_addr);
+
+/* Enable disable MAC RX/TX */
+void stmmac_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 old_val, value;
+
+ old_val = readl(ioaddr + MAC_CTRL_REG);
+ value = old_val;
+
+ if (enable)
+ value |= MAC_ENABLE_RX | MAC_ENABLE_TX;
+ else
+ value &= ~(MAC_ENABLE_TX | MAC_ENABLE_RX);
+
+ if (value != old_val)
+ writel(value, ioaddr + MAC_CTRL_REG);
+}
+
+void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
+ unsigned int high, unsigned int low)
+{
+ unsigned int hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + high);
+ lo_addr = readl(ioaddr + low);
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+EXPORT_SYMBOL_GPL(stmmac_get_mac_addr);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
new file mode 100644
index 000000000..880a75bf2
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2.h
@@ -0,0 +1,474 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC definitions.
+ */
+
+#ifndef __STMMAC_DWXGMAC2_H__
+#define __STMMAC_DWXGMAC2_H__
+
+#include "common.h"
+
+/* Misc */
+#define XGMAC_JUMBO_LEN 16368
+
+/* MAC Registers */
+#define XGMAC_TX_CONFIG 0x00000000
+#define XGMAC_CONFIG_SS_OFF 29
+#define XGMAC_CONFIG_SS_MASK GENMASK(31, 29)
+#define XGMAC_CONFIG_SS_10000 (0x0 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_2500_GMII (0x2 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_1000_GMII (0x3 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_100_MII (0x4 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_5000 (0x5 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_2500 (0x6 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SS_10_MII (0x7 << XGMAC_CONFIG_SS_OFF)
+#define XGMAC_CONFIG_SARC GENMASK(22, 20)
+#define XGMAC_CONFIG_SARC_SHIFT 20
+#define XGMAC_CONFIG_JD BIT(16)
+#define XGMAC_CONFIG_TE BIT(0)
+#define XGMAC_CORE_INIT_TX (XGMAC_CONFIG_JD)
+#define XGMAC_RX_CONFIG 0x00000004
+#define XGMAC_CONFIG_ARPEN BIT(31)
+#define XGMAC_CONFIG_GPSL GENMASK(29, 16)
+#define XGMAC_CONFIG_GPSL_SHIFT 16
+#define XGMAC_CONFIG_HDSMS GENMASK(14, 12)
+#define XGMAC_CONFIG_HDSMS_SHIFT 12
+#define XGMAC_CONFIG_HDSMS_256 (0x2 << XGMAC_CONFIG_HDSMS_SHIFT)
+#define XGMAC_CONFIG_S2KP BIT(11)
+#define XGMAC_CONFIG_LM BIT(10)
+#define XGMAC_CONFIG_IPC BIT(9)
+#define XGMAC_CONFIG_JE BIT(8)
+#define XGMAC_CONFIG_WD BIT(7)
+#define XGMAC_CONFIG_GPSLCE BIT(6)
+#define XGMAC_CONFIG_CST BIT(2)
+#define XGMAC_CONFIG_ACS BIT(1)
+#define XGMAC_CONFIG_RE BIT(0)
+#define XGMAC_CORE_INIT_RX (XGMAC_CONFIG_GPSLCE | XGMAC_CONFIG_WD | \
+ (XGMAC_JUMBO_LEN << XGMAC_CONFIG_GPSL_SHIFT))
+#define XGMAC_PACKET_FILTER 0x00000008
+#define XGMAC_FILTER_RA BIT(31)
+#define XGMAC_FILTER_IPFE BIT(20)
+#define XGMAC_FILTER_VTFE BIT(16)
+#define XGMAC_FILTER_HPF BIT(10)
+#define XGMAC_FILTER_PCF BIT(7)
+#define XGMAC_FILTER_PM BIT(4)
+#define XGMAC_FILTER_HMC BIT(2)
+#define XGMAC_FILTER_PR BIT(0)
+#define XGMAC_HASH_TABLE(x) (0x00000010 + (x) * 4)
+#define XGMAC_MAX_HASH_TABLE 8
+#define XGMAC_VLAN_TAG 0x00000050
+#define XGMAC_VLAN_EDVLP BIT(26)
+#define XGMAC_VLAN_VTHM BIT(25)
+#define XGMAC_VLAN_DOVLTC BIT(20)
+#define XGMAC_VLAN_ESVL BIT(18)
+#define XGMAC_VLAN_ETV BIT(16)
+#define XGMAC_VLAN_VID GENMASK(15, 0)
+#define XGMAC_VLAN_HASH_TABLE 0x00000058
+#define XGMAC_VLAN_INCL 0x00000060
+#define XGMAC_VLAN_VLTI BIT(20)
+#define XGMAC_VLAN_CSVL BIT(19)
+#define XGMAC_VLAN_VLC GENMASK(17, 16)
+#define XGMAC_VLAN_VLC_SHIFT 16
+#define XGMAC_RXQ_CTRL0 0x000000a0
+#define XGMAC_RXQEN(x) GENMASK((x) * 2 + 1, (x) * 2)
+#define XGMAC_RXQEN_SHIFT(x) ((x) * 2)
+#define XGMAC_RXQ_CTRL1 0x000000a4
+#define XGMAC_RQ GENMASK(7, 4)
+#define XGMAC_RQ_SHIFT 4
+#define XGMAC_RXQ_CTRL2 0x000000a8
+#define XGMAC_RXQ_CTRL3 0x000000ac
+#define XGMAC_PSRQ(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSRQ_SHIFT(x) ((x) * 8)
+#define XGMAC_INT_STATUS 0x000000b0
+#define XGMAC_LPIIS BIT(5)
+#define XGMAC_PMTIS BIT(4)
+#define XGMAC_INT_EN 0x000000b4
+#define XGMAC_TSIE BIT(12)
+#define XGMAC_LPIIE BIT(5)
+#define XGMAC_PMTIE BIT(4)
+#define XGMAC_INT_DEFAULT_EN (XGMAC_LPIIE | XGMAC_PMTIE)
+#define XGMAC_Qx_TX_FLOW_CTRL(x) (0x00000070 + (x) * 4)
+#define XGMAC_PT GENMASK(31, 16)
+#define XGMAC_PT_SHIFT 16
+#define XGMAC_TFE BIT(1)
+#define XGMAC_RX_FLOW_CTRL 0x00000090
+#define XGMAC_RFE BIT(0)
+#define XGMAC_PMT 0x000000c0
+#define XGMAC_GLBLUCAST BIT(9)
+#define XGMAC_RWKPKTEN BIT(2)
+#define XGMAC_MGKPKTEN BIT(1)
+#define XGMAC_PWRDWN BIT(0)
+#define XGMAC_LPI_CTRL 0x000000d0
+#define XGMAC_TXCGE BIT(21)
+#define XGMAC_LPITXA BIT(19)
+#define XGMAC_PLS BIT(17)
+#define XGMAC_LPITXEN BIT(16)
+#define XGMAC_RLPIEX BIT(3)
+#define XGMAC_RLPIEN BIT(2)
+#define XGMAC_TLPIEX BIT(1)
+#define XGMAC_TLPIEN BIT(0)
+#define XGMAC_LPI_TIMER_CTRL 0x000000d4
+#define XGMAC_HW_FEATURE0 0x0000011c
+#define XGMAC_HWFEAT_SAVLANINS BIT(27)
+#define XGMAC_HWFEAT_RXCOESEL BIT(16)
+#define XGMAC_HWFEAT_TXCOESEL BIT(14)
+#define XGMAC_HWFEAT_EEESEL BIT(13)
+#define XGMAC_HWFEAT_TSSEL BIT(12)
+#define XGMAC_HWFEAT_AVSEL BIT(11)
+#define XGMAC_HWFEAT_RAVSEL BIT(10)
+#define XGMAC_HWFEAT_ARPOFFSEL BIT(9)
+#define XGMAC_HWFEAT_MMCSEL BIT(8)
+#define XGMAC_HWFEAT_MGKSEL BIT(7)
+#define XGMAC_HWFEAT_RWKSEL BIT(6)
+#define XGMAC_HWFEAT_VLHASH BIT(4)
+#define XGMAC_HWFEAT_GMIISEL BIT(1)
+#define XGMAC_HW_FEATURE1 0x00000120
+#define XGMAC_HWFEAT_L3L4FNUM GENMASK(30, 27)
+#define XGMAC_HWFEAT_HASHTBLSZ GENMASK(25, 24)
+#define XGMAC_HWFEAT_RSSEN BIT(20)
+#define XGMAC_HWFEAT_TSOEN BIT(18)
+#define XGMAC_HWFEAT_SPHEN BIT(17)
+#define XGMAC_HWFEAT_ADDR64 GENMASK(15, 14)
+#define XGMAC_HWFEAT_TXFIFOSIZE GENMASK(10, 6)
+#define XGMAC_HWFEAT_RXFIFOSIZE GENMASK(4, 0)
+#define XGMAC_HW_FEATURE2 0x00000124
+#define XGMAC_HWFEAT_PPSOUTNUM GENMASK(26, 24)
+#define XGMAC_HWFEAT_TXCHCNT GENMASK(21, 18)
+#define XGMAC_HWFEAT_RXCHCNT GENMASK(15, 12)
+#define XGMAC_HWFEAT_TXQCNT GENMASK(9, 6)
+#define XGMAC_HWFEAT_RXQCNT GENMASK(3, 0)
+#define XGMAC_HW_FEATURE3 0x00000128
+#define XGMAC_HWFEAT_TBSSEL BIT(27)
+#define XGMAC_HWFEAT_FPESEL BIT(26)
+#define XGMAC_HWFEAT_ESTWID GENMASK(24, 23)
+#define XGMAC_HWFEAT_ESTDEP GENMASK(22, 20)
+#define XGMAC_HWFEAT_ESTSEL BIT(19)
+#define XGMAC_HWFEAT_ASP GENMASK(15, 14)
+#define XGMAC_HWFEAT_DVLAN BIT(13)
+#define XGMAC_HWFEAT_FRPES GENMASK(12, 11)
+#define XGMAC_HWFEAT_FRPPB GENMASK(10, 9)
+#define XGMAC_HWFEAT_FRPSEL BIT(3)
+#define XGMAC_MAC_DPP_FSM_INT_STATUS 0x00000150
+#define XGMAC_MAC_FSM_CONTROL 0x00000158
+#define XGMAC_PRTYEN BIT(1)
+#define XGMAC_TMOUTEN BIT(0)
+#define XGMAC_MDIO_ADDR 0x00000200
+#define XGMAC_MDIO_DATA 0x00000204
+#define XGMAC_MDIO_C22P 0x00000220
+#define XGMAC_FPE_CTRL_STS 0x00000280
+#define XGMAC_EFPE BIT(0)
+#define XGMAC_ADDRx_HIGH(x) (0x00000300 + (x) * 0x8)
+#define XGMAC_ADDR_MAX 32
+#define XGMAC_AE BIT(31)
+#define XGMAC_DCS GENMASK(19, 16)
+#define XGMAC_DCS_SHIFT 16
+#define XGMAC_ADDRx_LOW(x) (0x00000304 + (x) * 0x8)
+#define XGMAC_L3L4_ADDR_CTRL 0x00000c00
+#define XGMAC_IDDR GENMASK(15, 8)
+#define XGMAC_IDDR_SHIFT 8
+#define XGMAC_IDDR_FNUM 4
+#define XGMAC_TT BIT(1)
+#define XGMAC_XB BIT(0)
+#define XGMAC_L3L4_DATA 0x00000c04
+#define XGMAC_L3L4_CTRL 0x0
+#define XGMAC_L4DPIM0 BIT(21)
+#define XGMAC_L4DPM0 BIT(20)
+#define XGMAC_L4SPIM0 BIT(19)
+#define XGMAC_L4SPM0 BIT(18)
+#define XGMAC_L4PEN0 BIT(16)
+#define XGMAC_L3HDBM0 GENMASK(15, 11)
+#define XGMAC_L3HSBM0 GENMASK(10, 6)
+#define XGMAC_L3DAIM0 BIT(5)
+#define XGMAC_L3DAM0 BIT(4)
+#define XGMAC_L3SAIM0 BIT(3)
+#define XGMAC_L3SAM0 BIT(2)
+#define XGMAC_L3PEN0 BIT(0)
+#define XGMAC_L4_ADDR 0x1
+#define XGMAC_L4DP0 GENMASK(31, 16)
+#define XGMAC_L4DP0_SHIFT 16
+#define XGMAC_L4SP0 GENMASK(15, 0)
+#define XGMAC_L3_ADDR0 0x4
+#define XGMAC_L3_ADDR1 0x5
+#define XGMAC_L3_ADDR2 0x6
+#define XMGAC_L3_ADDR3 0x7
+#define XGMAC_ARP_ADDR 0x00000c10
+#define XGMAC_RSS_CTRL 0x00000c80
+#define XGMAC_UDP4TE BIT(3)
+#define XGMAC_TCP4TE BIT(2)
+#define XGMAC_IP2TE BIT(1)
+#define XGMAC_RSSE BIT(0)
+#define XGMAC_RSS_ADDR 0x00000c88
+#define XGMAC_RSSIA_SHIFT 8
+#define XGMAC_ADDRT BIT(2)
+#define XGMAC_CT BIT(1)
+#define XGMAC_OB BIT(0)
+#define XGMAC_RSS_DATA 0x00000c8c
+#define XGMAC_TIMESTAMP_STATUS 0x00000d20
+#define XGMAC_TXTSC BIT(15)
+#define XGMAC_TXTIMESTAMP_NSEC 0x00000d30
+#define XGMAC_TXTSSTSLO GENMASK(30, 0)
+#define XGMAC_TXTIMESTAMP_SEC 0x00000d34
+#define XGMAC_PPS_CONTROL 0x00000d70
+#define XGMAC_PPS_MAXIDX(x) ((((x) + 1) * 8) - 1)
+#define XGMAC_PPS_MINIDX(x) ((x) * 8)
+#define XGMAC_PPSx_MASK(x) \
+ GENMASK(XGMAC_PPS_MAXIDX(x), XGMAC_PPS_MINIDX(x))
+#define XGMAC_TRGTMODSELx(x, val) \
+ GENMASK(XGMAC_PPS_MAXIDX(x) - 1, XGMAC_PPS_MAXIDX(x) - 2) & \
+ ((val) << (XGMAC_PPS_MAXIDX(x) - 2))
+#define XGMAC_PPSCMDx(x, val) \
+ GENMASK(XGMAC_PPS_MINIDX(x) + 3, XGMAC_PPS_MINIDX(x)) & \
+ ((val) << XGMAC_PPS_MINIDX(x))
+#define XGMAC_PPSCMD_START 0x2
+#define XGMAC_PPSCMD_STOP 0x5
+#define XGMAC_PPSENx(x) BIT(4 + (x) * 8)
+#define XGMAC_PPSx_TARGET_TIME_SEC(x) (0x00000d80 + (x) * 0x10)
+#define XGMAC_PPSx_TARGET_TIME_NSEC(x) (0x00000d84 + (x) * 0x10)
+#define XGMAC_TRGTBUSY0 BIT(31)
+#define XGMAC_PPSx_INTERVAL(x) (0x00000d88 + (x) * 0x10)
+#define XGMAC_PPSx_WIDTH(x) (0x00000d8c + (x) * 0x10)
+
+/* MTL Registers */
+#define XGMAC_MTL_OPMODE 0x00001000
+#define XGMAC_FRPE BIT(15)
+#define XGMAC_ETSALG GENMASK(6, 5)
+#define XGMAC_WRR (0x0 << 5)
+#define XGMAC_WFQ (0x1 << 5)
+#define XGMAC_DWRR (0x2 << 5)
+#define XGMAC_RAA BIT(2)
+#define XGMAC_MTL_INT_STATUS 0x00001020
+#define XGMAC_MTL_RXQ_DMA_MAP0 0x00001030
+#define XGMAC_MTL_RXQ_DMA_MAP1 0x00001034
+#define XGMAC_QxMDMACH(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_QxMDMACH_SHIFT(x) ((x) * 8)
+#define XGMAC_QDDMACH BIT(7)
+#define XGMAC_TC_PRTY_MAP0 0x00001040
+#define XGMAC_TC_PRTY_MAP1 0x00001044
+#define XGMAC_PSTC(x) GENMASK((x) * 8 + 7, (x) * 8)
+#define XGMAC_PSTC_SHIFT(x) ((x) * 8)
+#define XGMAC_MTL_EST_CONTROL 0x00001050
+#define XGMAC_PTOV GENMASK(31, 23)
+#define XGMAC_PTOV_SHIFT 23
+#define XGMAC_SSWL BIT(1)
+#define XGMAC_EEST BIT(0)
+#define XGMAC_MTL_EST_GCL_CONTROL 0x00001080
+#define XGMAC_BTR_LOW 0x0
+#define XGMAC_BTR_HIGH 0x1
+#define XGMAC_CTR_LOW 0x2
+#define XGMAC_CTR_HIGH 0x3
+#define XGMAC_TER 0x4
+#define XGMAC_LLR 0x5
+#define XGMAC_ADDR_SHIFT 8
+#define XGMAC_GCRR BIT(2)
+#define XGMAC_SRWO BIT(0)
+#define XGMAC_MTL_EST_GCL_DATA 0x00001084
+#define XGMAC_MTL_RXP_CONTROL_STATUS 0x000010a0
+#define XGMAC_RXPI BIT(31)
+#define XGMAC_NPE GENMASK(23, 16)
+#define XGMAC_NVE GENMASK(7, 0)
+#define XGMAC_MTL_RXP_IACC_CTRL_ST 0x000010b0
+#define XGMAC_STARTBUSY BIT(31)
+#define XGMAC_WRRDN BIT(16)
+#define XGMAC_ADDR GENMASK(9, 0)
+#define XGMAC_MTL_RXP_IACC_DATA 0x000010b4
+#define XGMAC_MTL_ECC_CONTROL 0x000010c0
+#define XGMAC_MTL_SAFETY_INT_STATUS 0x000010c4
+#define XGMAC_MEUIS BIT(1)
+#define XGMAC_MECIS BIT(0)
+#define XGMAC_MTL_ECC_INT_ENABLE 0x000010c8
+#define XGMAC_RPCEIE BIT(12)
+#define XGMAC_ECEIE BIT(8)
+#define XGMAC_RXCEIE BIT(4)
+#define XGMAC_TXCEIE BIT(0)
+#define XGMAC_MTL_ECC_INT_STATUS 0x000010cc
+#define XGMAC_MTL_TXQ_OPMODE(x) (0x00001100 + (0x80 * (x)))
+#define XGMAC_TQS GENMASK(25, 16)
+#define XGMAC_TQS_SHIFT 16
+#define XGMAC_Q2TCMAP GENMASK(10, 8)
+#define XGMAC_Q2TCMAP_SHIFT 8
+#define XGMAC_TTC GENMASK(6, 4)
+#define XGMAC_TTC_SHIFT 4
+#define XGMAC_TXQEN GENMASK(3, 2)
+#define XGMAC_TXQEN_SHIFT 2
+#define XGMAC_TSF BIT(1)
+#define XGMAC_MTL_TCx_ETS_CONTROL(x) (0x00001110 + (0x80 * (x)))
+#define XGMAC_MTL_TCx_QUANTUM_WEIGHT(x) (0x00001118 + (0x80 * (x)))
+#define XGMAC_MTL_TCx_SENDSLOPE(x) (0x0000111c + (0x80 * (x)))
+#define XGMAC_MTL_TCx_HICREDIT(x) (0x00001120 + (0x80 * (x)))
+#define XGMAC_MTL_TCx_LOCREDIT(x) (0x00001124 + (0x80 * (x)))
+#define XGMAC_CC BIT(3)
+#define XGMAC_TSA GENMASK(1, 0)
+#define XGMAC_SP (0x0 << 0)
+#define XGMAC_CBS (0x1 << 0)
+#define XGMAC_ETS (0x2 << 0)
+#define XGMAC_MTL_RXQ_OPMODE(x) (0x00001140 + (0x80 * (x)))
+#define XGMAC_RQS GENMASK(25, 16)
+#define XGMAC_RQS_SHIFT 16
+#define XGMAC_EHFC BIT(7)
+#define XGMAC_RSF BIT(5)
+#define XGMAC_RTC GENMASK(1, 0)
+#define XGMAC_RTC_SHIFT 0
+#define XGMAC_MTL_RXQ_FLOW_CONTROL(x) (0x00001150 + (0x80 * (x)))
+#define XGMAC_RFD GENMASK(31, 17)
+#define XGMAC_RFD_SHIFT 17
+#define XGMAC_RFA GENMASK(15, 1)
+#define XGMAC_RFA_SHIFT 1
+#define XGMAC_MTL_QINTEN(x) (0x00001170 + (0x80 * (x)))
+#define XGMAC_RXOIE BIT(16)
+#define XGMAC_MTL_QINT_STATUS(x) (0x00001174 + (0x80 * (x)))
+#define XGMAC_RXOVFIS BIT(16)
+#define XGMAC_ABPSIS BIT(1)
+#define XGMAC_TXUNFIS BIT(0)
+#define XGMAC_MAC_REGSIZE (XGMAC_MTL_QINT_STATUS(15) / 4)
+
+/* DMA Registers */
+#define XGMAC_DMA_MODE 0x00003000
+#define XGMAC_SWR BIT(0)
+#define XGMAC_DMA_SYSBUS_MODE 0x00003004
+#define XGMAC_WR_OSR_LMT GENMASK(29, 24)
+#define XGMAC_WR_OSR_LMT_SHIFT 24
+#define XGMAC_RD_OSR_LMT GENMASK(21, 16)
+#define XGMAC_RD_OSR_LMT_SHIFT 16
+#define XGMAC_EN_LPI BIT(15)
+#define XGMAC_LPI_XIT_PKT BIT(14)
+#define XGMAC_AAL BIT(12)
+#define XGMAC_EAME BIT(11)
+#define XGMAC_BLEN GENMASK(7, 1)
+#define XGMAC_BLEN256 BIT(7)
+#define XGMAC_BLEN128 BIT(6)
+#define XGMAC_BLEN64 BIT(5)
+#define XGMAC_BLEN32 BIT(4)
+#define XGMAC_BLEN16 BIT(3)
+#define XGMAC_BLEN8 BIT(2)
+#define XGMAC_BLEN4 BIT(1)
+#define XGMAC_UNDEF BIT(0)
+#define XGMAC_TX_EDMA_CTRL 0x00003040
+#define XGMAC_TDPS GENMASK(29, 0)
+#define XGMAC_RX_EDMA_CTRL 0x00003044
+#define XGMAC_RDPS GENMASK(29, 0)
+#define XGMAC_DMA_TBS_CTRL0 0x00003054
+#define XGMAC_DMA_TBS_CTRL1 0x00003058
+#define XGMAC_DMA_TBS_CTRL2 0x0000305c
+#define XGMAC_DMA_TBS_CTRL3 0x00003060
+#define XGMAC_FTOS GENMASK(31, 8)
+#define XGMAC_FTOV BIT(0)
+#define XGMAC_DEF_FTOS (XGMAC_FTOS | XGMAC_FTOV)
+#define XGMAC_DMA_SAFETY_INT_STATUS 0x00003064
+#define XGMAC_MCSIS BIT(31)
+#define XGMAC_MSUIS BIT(29)
+#define XGMAC_MSCIS BIT(28)
+#define XGMAC_DEUIS BIT(1)
+#define XGMAC_DECIS BIT(0)
+#define XGMAC_DMA_ECC_INT_ENABLE 0x00003068
+#define XGMAC_DCEIE BIT(1)
+#define XGMAC_TCEIE BIT(0)
+#define XGMAC_DMA_ECC_INT_STATUS 0x0000306c
+#define XGMAC_DMA_CH_CONTROL(x) (0x00003100 + (0x80 * (x)))
+#define XGMAC_SPH BIT(24)
+#define XGMAC_PBLx8 BIT(16)
+#define XGMAC_DMA_CH_TX_CONTROL(x) (0x00003104 + (0x80 * (x)))
+#define XGMAC_EDSE BIT(28)
+#define XGMAC_TxPBL GENMASK(21, 16)
+#define XGMAC_TxPBL_SHIFT 16
+#define XGMAC_TSE BIT(12)
+#define XGMAC_OSP BIT(4)
+#define XGMAC_TXST BIT(0)
+#define XGMAC_DMA_CH_RX_CONTROL(x) (0x00003108 + (0x80 * (x)))
+#define XGMAC_RxPBL GENMASK(21, 16)
+#define XGMAC_RxPBL_SHIFT 16
+#define XGMAC_RBSZ GENMASK(14, 1)
+#define XGMAC_RBSZ_SHIFT 1
+#define XGMAC_RXST BIT(0)
+#define XGMAC_DMA_CH_TxDESC_HADDR(x) (0x00003110 + (0x80 * (x)))
+#define XGMAC_DMA_CH_TxDESC_LADDR(x) (0x00003114 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_HADDR(x) (0x00003118 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_LADDR(x) (0x0000311c + (0x80 * (x)))
+#define XGMAC_DMA_CH_TxDESC_TAIL_LPTR(x) (0x00003124 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_TAIL_LPTR(x) (0x0000312c + (0x80 * (x)))
+#define XGMAC_DMA_CH_TxDESC_RING_LEN(x) (0x00003130 + (0x80 * (x)))
+#define XGMAC_DMA_CH_RxDESC_RING_LEN(x) (0x00003134 + (0x80 * (x)))
+#define XGMAC_DMA_CH_INT_EN(x) (0x00003138 + (0x80 * (x)))
+#define XGMAC_NIE BIT(15)
+#define XGMAC_AIE BIT(14)
+#define XGMAC_RBUE BIT(7)
+#define XGMAC_RIE BIT(6)
+#define XGMAC_TBUE BIT(2)
+#define XGMAC_TIE BIT(0)
+#define XGMAC_DMA_INT_DEFAULT_EN (XGMAC_NIE | XGMAC_AIE | XGMAC_RBUE | \
+ XGMAC_RIE | XGMAC_TIE)
+#define XGMAC_DMA_INT_DEFAULT_RX (XGMAC_RBUE | XGMAC_RIE)
+#define XGMAC_DMA_INT_DEFAULT_TX (XGMAC_TIE)
+#define XGMAC_DMA_CH_Rx_WATCHDOG(x) (0x0000313c + (0x80 * (x)))
+#define XGMAC_RWT GENMASK(7, 0)
+#define XGMAC_DMA_CH_STATUS(x) (0x00003160 + (0x80 * (x)))
+#define XGMAC_NIS BIT(15)
+#define XGMAC_AIS BIT(14)
+#define XGMAC_FBE BIT(12)
+#define XGMAC_RBU BIT(7)
+#define XGMAC_RI BIT(6)
+#define XGMAC_TBU BIT(2)
+#define XGMAC_TPS BIT(1)
+#define XGMAC_TI BIT(0)
+#define XGMAC_REGSIZE ((0x0000317c + (0x80 * 15)) / 4)
+
+#define XGMAC_DMA_STATUS_MSK_COMMON (XGMAC_NIS | XGMAC_AIS | XGMAC_FBE)
+#define XGMAC_DMA_STATUS_MSK_RX (XGMAC_RBU | XGMAC_RI | \
+ XGMAC_DMA_STATUS_MSK_COMMON)
+#define XGMAC_DMA_STATUS_MSK_TX (XGMAC_TBU | XGMAC_TPS | XGMAC_TI | \
+ XGMAC_DMA_STATUS_MSK_COMMON)
+
+/* Descriptors */
+#define XGMAC_TDES0_LTV BIT(31)
+#define XGMAC_TDES0_LT GENMASK(7, 0)
+#define XGMAC_TDES1_LT GENMASK(31, 8)
+#define XGMAC_TDES2_IVT GENMASK(31, 16)
+#define XGMAC_TDES2_IVT_SHIFT 16
+#define XGMAC_TDES2_IOC BIT(31)
+#define XGMAC_TDES2_TTSE BIT(30)
+#define XGMAC_TDES2_B2L GENMASK(29, 16)
+#define XGMAC_TDES2_B2L_SHIFT 16
+#define XGMAC_TDES2_VTIR GENMASK(15, 14)
+#define XGMAC_TDES2_VTIR_SHIFT 14
+#define XGMAC_TDES2_B1L GENMASK(13, 0)
+#define XGMAC_TDES3_OWN BIT(31)
+#define XGMAC_TDES3_CTXT BIT(30)
+#define XGMAC_TDES3_FD BIT(29)
+#define XGMAC_TDES3_LD BIT(28)
+#define XGMAC_TDES3_CPC GENMASK(27, 26)
+#define XGMAC_TDES3_CPC_SHIFT 26
+#define XGMAC_TDES3_TCMSSV BIT(26)
+#define XGMAC_TDES3_SAIC GENMASK(25, 23)
+#define XGMAC_TDES3_SAIC_SHIFT 23
+#define XGMAC_TDES3_TBSV BIT(24)
+#define XGMAC_TDES3_THL GENMASK(22, 19)
+#define XGMAC_TDES3_THL_SHIFT 19
+#define XGMAC_TDES3_IVTIR GENMASK(19, 18)
+#define XGMAC_TDES3_IVTIR_SHIFT 18
+#define XGMAC_TDES3_TSE BIT(18)
+#define XGMAC_TDES3_IVLTV BIT(17)
+#define XGMAC_TDES3_CIC GENMASK(17, 16)
+#define XGMAC_TDES3_CIC_SHIFT 16
+#define XGMAC_TDES3_TPL GENMASK(17, 0)
+#define XGMAC_TDES3_VLTV BIT(16)
+#define XGMAC_TDES3_VT GENMASK(15, 0)
+#define XGMAC_TDES3_FL GENMASK(14, 0)
+#define XGMAC_RDES2_HL GENMASK(9, 0)
+#define XGMAC_RDES3_OWN BIT(31)
+#define XGMAC_RDES3_CTXT BIT(30)
+#define XGMAC_RDES3_IOC BIT(30)
+#define XGMAC_RDES3_LD BIT(28)
+#define XGMAC_RDES3_CDA BIT(27)
+#define XGMAC_RDES3_RSV BIT(26)
+#define XGMAC_RDES3_L34T GENMASK(23, 20)
+#define XGMAC_RDES3_L34T_SHIFT 20
+#define XGMAC_L34T_IP4TCP 0x1
+#define XGMAC_L34T_IP4UDP 0x2
+#define XGMAC_L34T_IP6TCP 0x9
+#define XGMAC_L34T_IP6UDP 0xA
+#define XGMAC_RDES3_ES BIT(15)
+#define XGMAC_RDES3_PL GENMASK(13, 0)
+#define XGMAC_RDES3_TSD BIT(6)
+#define XGMAC_RDES3_TSA BIT(4)
+
+#endif /* __STMMAC_DWXGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
new file mode 100644
index 000000000..c2181c277
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c
@@ -0,0 +1,1649 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
+#include <linux/iopoll.h>
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+#include "dwxlgmac2.h"
+#include "dwxgmac2.h"
+
+static void dwxgmac2_core_init(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 tx, rx;
+
+ tx = readl(ioaddr + XGMAC_TX_CONFIG);
+ rx = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ tx |= XGMAC_CORE_INIT_TX;
+ rx |= XGMAC_CORE_INIT_RX;
+
+ if (hw->ps) {
+ tx |= XGMAC_CONFIG_TE;
+ tx &= ~hw->link.speed_mask;
+
+ switch (hw->ps) {
+ case SPEED_10000:
+ tx |= hw->link.xgmii.speed10000;
+ break;
+ case SPEED_2500:
+ tx |= hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ default:
+ tx |= hw->link.speed1000;
+ break;
+ }
+ }
+
+ writel(tx, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx, ioaddr + XGMAC_RX_CONFIG);
+ writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
+}
+
+static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
+{
+ u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
+ u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ if (enable) {
+ tx |= XGMAC_CONFIG_TE;
+ rx |= XGMAC_CONFIG_RE;
+ } else {
+ tx &= ~XGMAC_CONFIG_TE;
+ rx &= ~XGMAC_CONFIG_RE;
+ }
+
+ writel(tx, ioaddr + XGMAC_TX_CONFIG);
+ writel(rx, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ if (hw->rx_csum)
+ value |= XGMAC_CONFIG_IPC;
+ else
+ value &= ~XGMAC_CONFIG_IPC;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+
+ return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
+}
+
+static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
+ writel(value, ioaddr + XGMAC_RXQ_CTRL0);
+}
+
+static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_RXQ_CTRL2 : XGMAC_RXQ_CTRL3;
+ if (queue >= 4)
+ queue -= 4;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSRQ(queue);
+ value |= (prio << XGMAC_PSRQ_SHIFT(queue)) & XGMAC_PSRQ(queue);
+
+ writel(value, ioaddr + reg);
+}
+
+static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
+ if (queue >= 4)
+ queue -= 4;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_PSTC(queue);
+ value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
+
+ writel(value, ioaddr + reg);
+}
+
+static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
+ u32 rx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_OPMODE);
+ value &= ~XGMAC_RAA;
+
+ switch (rx_alg) {
+ case MTL_RX_ALGORITHM_SP:
+ break;
+ case MTL_RX_ALGORITHM_WSP:
+ value |= XGMAC_RAA;
+ break;
+ default:
+ break;
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
+ u32 tx_alg)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ bool ets = true;
+ u32 value;
+ int i;
+
+ value = readl(ioaddr + XGMAC_MTL_OPMODE);
+ value &= ~XGMAC_ETSALG;
+
+ switch (tx_alg) {
+ case MTL_TX_ALGORITHM_WRR:
+ value |= XGMAC_WRR;
+ break;
+ case MTL_TX_ALGORITHM_WFQ:
+ value |= XGMAC_WFQ;
+ break;
+ case MTL_TX_ALGORITHM_DWRR:
+ value |= XGMAC_DWRR;
+ break;
+ default:
+ ets = false;
+ break;
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_OPMODE);
+
+ /* Set ETS if desired */
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
+ value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ value &= ~XGMAC_TSA;
+ if (ets)
+ value |= XGMAC_ETS;
+ writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
+ }
+}
+
+static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
+ u32 weight, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
+}
+
+static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
+ u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, reg;
+
+ reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
+ if (queue >= 4)
+ queue -= 4;
+
+ value = readl(ioaddr + reg);
+ value &= ~XGMAC_QxMDMACH(queue);
+ value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
+
+ writel(value, ioaddr + reg);
+}
+
+static void dwxgmac2_config_cbs(struct mac_device_info *hw,
+ u32 send_slope, u32 idle_slope,
+ u32 high_credit, u32 low_credit, u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
+ writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
+ writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
+ writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
+
+ value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
+ value &= ~XGMAC_TSA;
+ value |= XGMAC_CC | XGMAC_CBS;
+ writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
+}
+
+static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int i;
+
+ for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
+static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 stat, en;
+ int ret = 0;
+
+ en = readl(ioaddr + XGMAC_INT_EN);
+ stat = readl(ioaddr + XGMAC_INT_STATUS);
+
+ stat &= en;
+
+ if (stat & XGMAC_PMTIS) {
+ x->irq_receive_pmt_irq_n++;
+ readl(ioaddr + XGMAC_PMT);
+ }
+
+ if (stat & XGMAC_LPIIS) {
+ u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ if (lpi & XGMAC_TLPIEN) {
+ ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
+ x->irq_tx_path_in_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_TLPIEX) {
+ ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
+ x->irq_tx_path_exit_lpi_mode_n++;
+ }
+ if (lpi & XGMAC_RLPIEN)
+ x->irq_rx_path_in_lpi_mode_n++;
+ if (lpi & XGMAC_RLPIEX)
+ x->irq_rx_path_exit_lpi_mode_n++;
+ }
+
+ return ret;
+}
+
+static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ int ret = 0;
+ u32 status;
+
+ status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
+ if (status & BIT(chan)) {
+ u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
+
+ if (chan_status & XGMAC_RXOVFIS)
+ ret |= CORE_IRQ_MTL_RX_OVERFLOW;
+
+ writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
+ }
+
+ return ret;
+}
+
+static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time,
+ u32 tx_cnt)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 i;
+
+ if (fc & FLOW_RX)
+ writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
+ if (fc & FLOW_TX) {
+ for (i = 0; i < tx_cnt; i++) {
+ u32 value = XGMAC_TFE;
+
+ if (duplex)
+ value |= pause_time << XGMAC_PT_SHIFT;
+
+ writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
+ }
+ }
+}
+
+static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 val = 0x0;
+
+ if (mode & WAKE_MAGIC)
+ val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
+ if (mode & WAKE_UCAST)
+ val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
+ if (val) {
+ u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
+ cfg |= XGMAC_CONFIG_RE;
+ writel(cfg, ioaddr + XGMAC_RX_CONFIG);
+ }
+
+ writel(val, ioaddr + XGMAC_PMT);
+}
+
+static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
+ const unsigned char *addr,
+ unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = (addr[5] << 8) | addr[4];
+ writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
+
+ value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
+ writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
+}
+
+static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
+ unsigned char *addr, unsigned int reg_n)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 hi_addr, lo_addr;
+
+ /* Read the MAC address from the hardware */
+ hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
+ lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
+
+ /* Extract the MAC address from the high and low words */
+ addr[0] = lo_addr & 0xff;
+ addr[1] = (lo_addr >> 8) & 0xff;
+ addr[2] = (lo_addr >> 16) & 0xff;
+ addr[3] = (lo_addr >> 24) & 0xff;
+ addr[4] = hi_addr & 0xff;
+ addr[5] = (hi_addr >> 8) & 0xff;
+}
+
+static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+
+ value |= XGMAC_LPITXEN | XGMAC_LPITXA;
+ if (en_tx_lpi_clockgating)
+ value |= XGMAC_TXCGE;
+
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_LPI_CTRL);
+ if (link)
+ value |= XGMAC_PLS;
+ else
+ value &= ~XGMAC_PLS;
+ writel(value, ioaddr + XGMAC_LPI_CTRL);
+}
+
+static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
+ writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
+}
+
+static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
+ int mcbitslog2)
+{
+ int numhashregs, regs;
+
+ switch (mcbitslog2) {
+ case 6:
+ numhashregs = 2;
+ break;
+ case 7:
+ numhashregs = 4;
+ break;
+ case 8:
+ numhashregs = 8;
+ break;
+ default:
+ return;
+ }
+
+ for (regs = 0; regs < numhashregs; regs++)
+ writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
+}
+
+static void dwxgmac2_set_filter(struct mac_device_info *hw,
+ struct net_device *dev)
+{
+ void __iomem *ioaddr = (void __iomem *)dev->base_addr;
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+ int mcbitslog2 = hw->mcast_bits_log2;
+ u32 mc_filter[8];
+ int i;
+
+ value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
+ value |= XGMAC_FILTER_HPF;
+
+ memset(mc_filter, 0, sizeof(mc_filter));
+
+ if (dev->flags & IFF_PROMISC) {
+ value |= XGMAC_FILTER_PR;
+ value |= XGMAC_FILTER_PCF;
+ } else if ((dev->flags & IFF_ALLMULTI) ||
+ (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
+ value |= XGMAC_FILTER_PM;
+
+ for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
+ writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
+ } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
+ struct netdev_hw_addr *ha;
+
+ value |= XGMAC_FILTER_HMC;
+
+ netdev_for_each_mc_addr(ha, dev) {
+ u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
+ (32 - mcbitslog2));
+ mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
+ }
+ }
+
+ dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
+
+ /* Handle multiple unicast addresses */
+ if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
+ value |= XGMAC_FILTER_PR;
+ } else {
+ struct netdev_hw_addr *ha;
+ int reg = 1;
+
+ netdev_for_each_uc_addr(ha, dev) {
+ dwxgmac2_set_umac_addr(hw, ha->addr, reg);
+ reg++;
+ }
+
+ for ( ; reg < XGMAC_ADDR_MAX; reg++) {
+ writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
+ writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
+ }
+ }
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+}
+
+static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
+{
+ u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ if (enable)
+ value |= XGMAC_CONFIG_LM;
+ else
+ value &= ~XGMAC_CONFIG_LM;
+
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
+ u32 val)
+{
+ u32 ctrl = 0;
+
+ writel(val, ioaddr + XGMAC_RSS_DATA);
+ ctrl |= idx << XGMAC_RSSIA_SHIFT;
+ ctrl |= is_key ? XGMAC_ADDRT : 0x0;
+ ctrl |= XGMAC_OB;
+ writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
+
+ return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
+ !(ctrl & XGMAC_OB), 100, 10000);
+}
+
+static int dwxgmac2_rss_configure(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value, *key;
+ int i, ret;
+
+ value = readl(ioaddr + XGMAC_RSS_CTRL);
+ if (!cfg || !cfg->enable) {
+ value &= ~XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+ }
+
+ key = (u32 *)cfg->key;
+ for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
+ ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < num_rxq; i++)
+ dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
+
+ value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
+ writel(value, ioaddr + XGMAC_RSS_CTRL);
+ return 0;
+}
+
+static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
+ __le16 perfect_match, bool is_double)
+{
+ void __iomem *ioaddr = hw->pcsr;
+
+ writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
+
+ if (hash) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
+ } else {
+ value &= ~XGMAC_VLAN_EDVLP;
+ value &= ~XGMAC_VLAN_ESVL;
+ value &= ~XGMAC_VLAN_DOVLTC;
+ }
+
+ value &= ~XGMAC_VLAN_VID;
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ } else if (perfect_match) {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value |= XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value &= ~XGMAC_VLAN_VTHM;
+ value |= XGMAC_VLAN_ETV;
+ if (is_double) {
+ value |= XGMAC_VLAN_EDVLP;
+ value |= XGMAC_VLAN_ESVL;
+ value |= XGMAC_VLAN_DOVLTC;
+ } else {
+ value &= ~XGMAC_VLAN_EDVLP;
+ value &= ~XGMAC_VLAN_ESVL;
+ value &= ~XGMAC_VLAN_DOVLTC;
+ }
+
+ value &= ~XGMAC_VLAN_VID;
+ writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
+ } else {
+ u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
+
+ value &= ~XGMAC_FILTER_VTFE;
+
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ value = readl(ioaddr + XGMAC_VLAN_TAG);
+
+ value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
+ value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
+ value &= ~XGMAC_VLAN_DOVLTC;
+ value &= ~XGMAC_VLAN_VID;
+
+ writel(value, ioaddr + XGMAC_VLAN_TAG);
+ }
+}
+
+struct dwxgmac3_error_desc {
+ bool valid;
+ const char *desc;
+ const char *detailed_desc;
+};
+
+#define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
+
+static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
+ const char *module_name,
+ const struct dwxgmac3_error_desc *desc,
+ unsigned long field_offset,
+ struct stmmac_safety_stats *stats)
+{
+ unsigned long loc, mask;
+ u8 *bptr = (u8 *)stats;
+ unsigned long *ptr;
+
+ ptr = (unsigned long *)(bptr + field_offset);
+
+ mask = value;
+ for_each_set_bit(loc, &mask, 32) {
+ netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
+ "correctable" : "uncorrectable", module_name,
+ desc[loc].desc, desc[loc].detailed_desc);
+
+ /* Update counters */
+ ptr[loc]++;
+ }
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
+ { true, "ATPES", "Application Transmit Interface Parity Check Error" },
+ { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
+ { true, "TPES", "TSO Data Path Parity Check Error" },
+ { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
+ { true, "MTPES", "MTL Data Path Parity Check Error" },
+ { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
+ { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
+ { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
+ { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
+ { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
+ { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
+ { true, "CWPES", "CSR Write Data Path Parity Check Error" },
+ { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
+ { true, "TTES", "TX FSM Timeout Error" },
+ { true, "RTES", "RX FSM Timeout Error" },
+ { true, "CTES", "CSR FSM Timeout Error" },
+ { true, "ATES", "APP FSM Timeout Error" },
+ { true, "PTES", "PTP FSM Timeout Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { true, "MSTTES", "Master Read/Write Timeout Error" },
+ { true, "SLVTES", "Slave Read/Write Timeout Error" },
+ { true, "ATITES", "Application Timeout on ATI Interface Error" },
+ { true, "ARITES", "Application Timeout on ARI Interface Error" },
+ { true, "FSMPES", "FSM State Parity Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { true, "CPI", "Control Register Parity Check Error" },
+};
+
+static void dwxgmac3_handle_mac_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MAC",
+ dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
+ { true, "TXCES", "MTL TX Memory Error" },
+ { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
+ { true, "TXUES", "MTL TX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "RXCES", "MTL RX Memory Error" },
+ { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
+ { true, "RXUES", "MTL RX Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { true, "ECES", "MTL EST Memory Error" },
+ { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
+ { true, "EUES", "MTL EST Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { true, "RPCES", "MTL RX Parser Memory Error" },
+ { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
+ { true, "RPUES", "MTL RX Parser Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "MTL",
+ dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
+}
+
+static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
+ { true, "TCES", "DMA TSO Memory Error" },
+ { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
+ { true, "TUES", "DMA TSO Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 3 */
+ { true, "DCES", "DMA DCACHE Memory Error" },
+ { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
+ { true, "DUES", "DMA DCACHE Memory Error" },
+ { false, "UNKNOWN", "Unknown Error" }, /* 7 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 8 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 9 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 10 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 11 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 12 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 13 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 14 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 15 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 16 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 17 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 18 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 19 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 20 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 21 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 22 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 23 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 24 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 25 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 26 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 27 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 28 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 29 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 30 */
+ { false, "UNKNOWN", "Unknown Error" }, /* 31 */
+};
+
+static void dwxgmac3_handle_dma_err(struct net_device *ndev,
+ void __iomem *ioaddr, bool correctable,
+ struct stmmac_safety_stats *stats)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
+
+ dwxgmac3_log_error(ndev, value, correctable, "DMA",
+ dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
+}
+
+static int
+dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_cfg)
+{
+ u32 value;
+
+ if (!asp)
+ return -EINVAL;
+
+ /* 1. Enable Safety Features */
+ writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
+
+ /* 2. Enable MTL Safety Interrupts */
+ value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+ value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
+ value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
+ value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
+ value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
+
+ /* 3. Enable DMA Safety Interrupts */
+ value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+ value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
+ value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
+ writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
+
+ /* Only ECC Protection for External Memory feature is selected */
+ if (asp <= 0x1)
+ return 0;
+
+ /* 4. Enable Parity and Timeout for FSM */
+ value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
+ value |= XGMAC_PRTYEN; /* FSM Parity Feature */
+ value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
+ writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
+
+ return 0;
+}
+
+static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
+ void __iomem *ioaddr,
+ unsigned int asp,
+ struct stmmac_safety_stats *stats)
+{
+ bool err, corr;
+ u32 mtl, dma;
+ int ret = 0;
+
+ if (!asp)
+ return -EINVAL;
+
+ mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
+ dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
+
+ err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
+ corr = false;
+ if (err) {
+ dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
+ (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
+ corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
+ if (err) {
+ dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ err = dma & (XGMAC_DEUIS | XGMAC_DECIS);
+ corr = dma & XGMAC_DECIS;
+ if (err) {
+ dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
+ ret |= !corr;
+ }
+
+ return ret;
+}
+
+static const struct dwxgmac3_error {
+ const struct dwxgmac3_error_desc *desc;
+} dwxgmac3_all_errors[] = {
+ { dwxgmac3_mac_errors },
+ { dwxgmac3_mtl_errors },
+ { dwxgmac3_dma_errors },
+};
+
+static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count,
+ const char **desc)
+{
+ int module = index / 32, offset = index % 32;
+ unsigned long *ptr = (unsigned long *)stats;
+
+ if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
+ return -EINVAL;
+ if (!dwxgmac3_all_errors[module].desc[offset].valid)
+ return -EINVAL;
+ if (count)
+ *count = *(ptr + index);
+ if (desc)
+ *desc = dwxgmac3_all_errors[module].desc[offset].desc;
+ return 0;
+}
+
+static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
+{
+ u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
+
+ val &= ~XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+
+ return 0;
+}
+
+static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
+{
+ u32 val;
+
+ val = readl(ioaddr + XGMAC_MTL_OPMODE);
+ val |= XGMAC_FRPE;
+ writel(val, ioaddr + XGMAC_MTL_OPMODE);
+}
+
+static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entry,
+ int pos)
+{
+ int ret, i;
+
+ for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
+ int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
+ u32 val;
+
+ /* Wait for ready */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+
+ /* Write data */
+ val = *((u32 *)&entry->val + i);
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
+
+ /* Write pos */
+ val = real_pos & XGMAC_ADDR;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Write OP */
+ val |= XGMAC_WRRDN;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Start Write */
+ val |= XGMAC_STARTBUSY;
+ writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
+
+ /* Wait for done */
+ ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
+ val, !(val & XGMAC_STARTBUSY), 1, 10000);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static struct stmmac_tc_entry *
+dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
+ unsigned int count, u32 curr_prio)
+{
+ struct stmmac_tc_entry *entry;
+ u32 min_prio = ~0x0;
+ int i, min_prio_idx;
+ bool found = false;
+
+ for (i = count - 1; i >= 0; i--) {
+ entry = &entries[i];
+
+ /* Do not update unused entries */
+ if (!entry->in_use)
+ continue;
+ /* Do not update already updated entries (i.e. fragments) */
+ if (entry->in_hw)
+ continue;
+ /* Let last entry be updated last */
+ if (entry->is_last)
+ continue;
+ /* Do not return fragments */
+ if (entry->is_frag)
+ continue;
+ /* Check if we already checked this prio */
+ if (entry->prio < curr_prio)
+ continue;
+ /* Check if this is the minimum prio */
+ if (entry->prio < min_prio) {
+ min_prio = entry->prio;
+ min_prio_idx = i;
+ found = true;
+ }
+ }
+
+ if (found)
+ return &entries[min_prio_idx];
+ return NULL;
+}
+
+static int dwxgmac3_rxp_config(void __iomem *ioaddr,
+ struct stmmac_tc_entry *entries,
+ unsigned int count)
+{
+ struct stmmac_tc_entry *entry, *frag;
+ int i, ret, nve = 0;
+ u32 curr_prio = 0;
+ u32 old_val, val;
+
+ /* Force disable RX */
+ old_val = readl(ioaddr + XGMAC_RX_CONFIG);
+ val = old_val & ~XGMAC_CONFIG_RE;
+ writel(val, ioaddr + XGMAC_RX_CONFIG);
+
+ /* Disable RX Parser */
+ ret = dwxgmac3_rxp_disable(ioaddr);
+ if (ret)
+ goto re_enable;
+
+ /* Set all entries as NOT in HW */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ entry->in_hw = false;
+ }
+
+ /* Update entries by reverse order */
+ while (1) {
+ entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
+ if (!entry)
+ break;
+
+ curr_prio = entry->prio;
+ frag = entry->frag_ptr;
+
+ /* Set special fragment requirements */
+ if (frag) {
+ entry->val.af = 0;
+ entry->val.rf = 0;
+ entry->val.nc = 1;
+ entry->val.ok_index = nve + 2;
+ }
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ entry->in_hw = true;
+
+ if (frag && !frag->in_hw) {
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
+ if (ret)
+ goto re_enable;
+ frag->table_pos = nve++;
+ frag->in_hw = true;
+ }
+ }
+
+ if (!nve)
+ goto re_enable;
+
+ /* Update all pass entry */
+ for (i = 0; i < count; i++) {
+ entry = &entries[i];
+ if (!entry->is_last)
+ continue;
+
+ ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
+ if (ret)
+ goto re_enable;
+
+ entry->table_pos = nve++;
+ }
+
+ /* Assume n. of parsable entries == n. of valid entries */
+ val = (nve << 16) & XGMAC_NPE;
+ val |= nve & XGMAC_NVE;
+ writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
+
+ /* Enable RX Parser */
+ dwxgmac3_rxp_enable(ioaddr);
+
+re_enable:
+ /* Re-enable RX */
+ writel(old_val, ioaddr + XGMAC_RX_CONFIG);
+ return ret;
+}
+
+static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
+ value, value & XGMAC_TXTSC, 100, 10000))
+ return -EBUSY;
+
+ *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
+ *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
+ return 0;
+}
+
+static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags)
+{
+ u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+ u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
+ u64 period;
+
+ if (!cfg->available)
+ return -EINVAL;
+ if (tnsec & XGMAC_TRGTBUSY0)
+ return -EBUSY;
+ if (!sub_second_inc || !systime_flags)
+ return -EINVAL;
+
+ val &= ~XGMAC_PPSx_MASK(index);
+
+ if (!enable) {
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+ }
+
+ val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
+ val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
+
+ /* XGMAC Core has 4 PPS outputs at most.
+ *
+ * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
+ * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
+ * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
+ * read-only reserved to 0.
+ * But we always set PPSEN{1,2,3} do not make things worse ;-)
+ *
+ * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
+ * be set, or the PPS outputs stay in Fixed PPS mode by default.
+ */
+ val |= XGMAC_PPSENx(index);
+
+ writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
+
+ if (!(systime_flags & PTP_TCR_TSCTRLSSR))
+ cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
+ writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
+
+ period = cfg->period.tv_sec * 1000000000;
+ period += cfg->period.tv_nsec;
+
+ do_div(period, sub_second_inc);
+
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
+
+ period >>= 1;
+ if (period <= 1)
+ return -EINVAL;
+
+ writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
+
+ /* Finally, activate it */
+ writel(val, ioaddr + XGMAC_PPS_CONTROL);
+ return 0;
+}
+
+static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
+{
+ u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_SARC;
+ value |= val << XGMAC_CONFIG_SARC_SHIFT;
+
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_VLAN_INCL);
+ value |= XGMAC_VLAN_VLTI;
+ value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
+ value &= ~XGMAC_VLAN_VLC;
+ value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
+ writel(value, ioaddr + XGMAC_VLAN_INCL);
+}
+
+static int dwxgmac2_filter_wait(struct mac_device_info *hw)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
+ !(value & XGMAC_XB), 100, 10000))
+ return -EBUSY;
+ return 0;
+}
+
+static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
+ u8 reg, u32 *data)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ int ret;
+
+ ret = dwxgmac2_filter_wait(hw);
+ if (ret)
+ return ret;
+
+ value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
+ value |= XGMAC_TT | XGMAC_XB;
+ writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
+
+ ret = dwxgmac2_filter_wait(hw);
+ if (ret)
+ return ret;
+
+ *data = readl(ioaddr + XGMAC_L3L4_DATA);
+ return 0;
+}
+
+static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
+ u8 reg, u32 data)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ int ret;
+
+ ret = dwxgmac2_filter_wait(hw);
+ if (ret)
+ return ret;
+
+ writel(data, ioaddr + XGMAC_L3L4_DATA);
+
+ value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
+ value |= XGMAC_XB;
+ writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
+
+ return dwxgmac2_filter_wait(hw);
+}
+
+static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool ipv6, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ int ret;
+
+ value = readl(ioaddr + XGMAC_PACKET_FILTER);
+ value |= XGMAC_FILTER_IPFE;
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
+ if (ret)
+ return ret;
+
+ /* For IPv6 not both SA/DA filters can be active */
+ if (ipv6) {
+ value |= XGMAC_L3PEN0;
+ value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
+ value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
+ if (sa) {
+ value |= XGMAC_L3SAM0;
+ if (inv)
+ value |= XGMAC_L3SAIM0;
+ } else {
+ value |= XGMAC_L3DAM0;
+ if (inv)
+ value |= XGMAC_L3DAIM0;
+ }
+ } else {
+ value &= ~XGMAC_L3PEN0;
+ if (sa) {
+ value |= XGMAC_L3SAM0;
+ if (inv)
+ value |= XGMAC_L3SAIM0;
+ } else {
+ value |= XGMAC_L3DAM0;
+ if (inv)
+ value |= XGMAC_L3DAIM0;
+ }
+ }
+
+ ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
+ if (ret)
+ return ret;
+
+ if (sa) {
+ ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
+ if (ret)
+ return ret;
+ } else {
+ ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
+ if (ret)
+ return ret;
+ }
+
+ if (!en)
+ return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
+
+ return 0;
+}
+
+static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool udp, bool sa, bool inv,
+ u32 match)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+ int ret;
+
+ value = readl(ioaddr + XGMAC_PACKET_FILTER);
+ value |= XGMAC_FILTER_IPFE;
+ writel(value, ioaddr + XGMAC_PACKET_FILTER);
+
+ ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
+ if (ret)
+ return ret;
+
+ if (udp) {
+ value |= XGMAC_L4PEN0;
+ } else {
+ value &= ~XGMAC_L4PEN0;
+ }
+
+ value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
+ value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
+ if (sa) {
+ value |= XGMAC_L4SPM0;
+ if (inv)
+ value |= XGMAC_L4SPIM0;
+ } else {
+ value |= XGMAC_L4DPM0;
+ if (inv)
+ value |= XGMAC_L4DPIM0;
+ }
+
+ ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
+ if (ret)
+ return ret;
+
+ if (sa) {
+ value = match & XGMAC_L4SP0;
+
+ ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
+ if (ret)
+ return ret;
+ } else {
+ value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
+
+ ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
+ if (ret)
+ return ret;
+ }
+
+ if (!en)
+ return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
+
+ return 0;
+}
+
+static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
+ u32 addr)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ writel(addr, ioaddr + XGMAC_ARP_ADDR);
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ if (en)
+ value |= XGMAC_CONFIG_ARPEN;
+ else
+ value &= ~XGMAC_CONFIG_ARPEN;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
+{
+ u32 ctrl;
+
+ writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
+
+ ctrl = (reg << XGMAC_ADDR_SHIFT);
+ ctrl |= gcl ? 0 : XGMAC_GCRR;
+
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
+
+ ctrl |= XGMAC_SRWO;
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
+
+ return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
+ ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
+}
+
+static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate)
+{
+ int i, ret = 0x0;
+ u32 ctrl;
+
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
+ ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < cfg->gcl_size; i++) {
+ ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
+ if (ret)
+ return ret;
+ }
+
+ ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
+ ctrl &= ~XGMAC_PTOV;
+ ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
+ if (cfg->enable)
+ ctrl |= XGMAC_EEST | XGMAC_SSWL;
+ else
+ ctrl &= ~XGMAC_EEST;
+
+ writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
+ return 0;
+}
+
+static void dwxgmac3_fpe_configure(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ u32 num_txq,
+ u32 num_rxq, bool enable)
+{
+ u32 value;
+
+ if (!enable) {
+ value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
+
+ value &= ~XGMAC_EFPE;
+
+ writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
+ return;
+ }
+
+ value = readl(ioaddr + XGMAC_RXQ_CTRL1);
+ value &= ~XGMAC_RQ;
+ value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
+ writel(value, ioaddr + XGMAC_RXQ_CTRL1);
+
+ value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
+ value |= XGMAC_EFPE;
+ writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
+}
+
+const struct stmmac_ops dwxgmac210_ops = {
+ .core_init = dwxgmac2_core_init,
+ .set_mac = dwxgmac2_set_mac,
+ .rx_ipc = dwxgmac2_rx_ipc,
+ .rx_queue_enable = dwxgmac2_rx_queue_enable,
+ .rx_queue_prio = dwxgmac2_rx_queue_prio,
+ .tx_queue_prio = dwxgmac2_tx_queue_prio,
+ .rx_queue_routing = NULL,
+ .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
+ .config_cbs = dwxgmac2_config_cbs,
+ .dump_regs = dwxgmac2_dump_regs,
+ .host_irq_status = dwxgmac2_host_irq_status,
+ .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
+ .flow_ctrl = dwxgmac2_flow_ctrl,
+ .pmt = dwxgmac2_pmt,
+ .set_umac_addr = dwxgmac2_set_umac_addr,
+ .get_umac_addr = dwxgmac2_get_umac_addr,
+ .set_eee_mode = dwxgmac2_set_eee_mode,
+ .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_eee_timer = dwxgmac2_set_eee_timer,
+ .set_eee_pls = dwxgmac2_set_eee_pls,
+ .pcs_ctrl_ane = NULL,
+ .pcs_rane = NULL,
+ .pcs_get_adv_lp = NULL,
+ .debug = NULL,
+ .set_filter = dwxgmac2_set_filter,
+ .safety_feat_config = dwxgmac3_safety_feat_config,
+ .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
+ .safety_feat_dump = dwxgmac3_safety_feat_dump,
+ .set_mac_loopback = dwxgmac2_set_mac_loopback,
+ .rss_configure = dwxgmac2_rss_configure,
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .rxp_config = dwxgmac3_rxp_config,
+ .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
+ .flex_pps_config = dwxgmac2_flex_pps_config,
+ .sarc_configure = dwxgmac2_sarc_configure,
+ .enable_vlan = dwxgmac2_enable_vlan,
+ .config_l3_filter = dwxgmac2_config_l3_filter,
+ .config_l4_filter = dwxgmac2_config_l4_filter,
+ .set_arp_offload = dwxgmac2_set_arp_offload,
+ .est_configure = dwxgmac3_est_configure,
+ .fpe_configure = dwxgmac3_fpe_configure,
+};
+
+static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
+ u32 queue)
+{
+ void __iomem *ioaddr = hw->pcsr;
+ u32 value;
+
+ value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
+ if (mode == MTL_QUEUE_AVB)
+ value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
+ else if (mode == MTL_QUEUE_DCB)
+ value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
+ writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
+}
+
+const struct stmmac_ops dwxlgmac2_ops = {
+ .core_init = dwxgmac2_core_init,
+ .set_mac = dwxgmac2_set_mac,
+ .rx_ipc = dwxgmac2_rx_ipc,
+ .rx_queue_enable = dwxlgmac2_rx_queue_enable,
+ .rx_queue_prio = dwxgmac2_rx_queue_prio,
+ .tx_queue_prio = dwxgmac2_tx_queue_prio,
+ .rx_queue_routing = NULL,
+ .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
+ .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
+ .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
+ .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
+ .config_cbs = dwxgmac2_config_cbs,
+ .dump_regs = dwxgmac2_dump_regs,
+ .host_irq_status = dwxgmac2_host_irq_status,
+ .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
+ .flow_ctrl = dwxgmac2_flow_ctrl,
+ .pmt = dwxgmac2_pmt,
+ .set_umac_addr = dwxgmac2_set_umac_addr,
+ .get_umac_addr = dwxgmac2_get_umac_addr,
+ .set_eee_mode = dwxgmac2_set_eee_mode,
+ .reset_eee_mode = dwxgmac2_reset_eee_mode,
+ .set_eee_timer = dwxgmac2_set_eee_timer,
+ .set_eee_pls = dwxgmac2_set_eee_pls,
+ .pcs_ctrl_ane = NULL,
+ .pcs_rane = NULL,
+ .pcs_get_adv_lp = NULL,
+ .debug = NULL,
+ .set_filter = dwxgmac2_set_filter,
+ .safety_feat_config = dwxgmac3_safety_feat_config,
+ .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
+ .safety_feat_dump = dwxgmac3_safety_feat_dump,
+ .set_mac_loopback = dwxgmac2_set_mac_loopback,
+ .rss_configure = dwxgmac2_rss_configure,
+ .update_vlan_hash = dwxgmac2_update_vlan_hash,
+ .rxp_config = dwxgmac3_rxp_config,
+ .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
+ .flex_pps_config = dwxgmac2_flex_pps_config,
+ .sarc_configure = dwxgmac2_sarc_configure,
+ .enable_vlan = dwxgmac2_enable_vlan,
+ .config_l3_filter = dwxgmac2_config_l3_filter,
+ .config_l4_filter = dwxgmac2_config_l4_filter,
+ .set_arp_offload = dwxgmac2_set_arp_offload,
+ .est_configure = dwxgmac3_est_configure,
+ .fpe_configure = dwxgmac3_fpe_configure,
+};
+
+int dwxgmac2_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tXGMAC2\n");
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->link.duplex = 0;
+ mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
+ mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
+ mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
+ mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
+ mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
+ mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
+ mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
+ mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
+
+ mac->mii.addr = XGMAC_MDIO_ADDR;
+ mac->mii.data = XGMAC_MDIO_DATA;
+ mac->mii.addr_shift = 16;
+ mac->mii.addr_mask = GENMASK(20, 16);
+ mac->mii.reg_shift = 0;
+ mac->mii.reg_mask = GENMASK(15, 0);
+ mac->mii.clk_csr_shift = 19;
+ mac->mii.clk_csr_mask = GENMASK(21, 19);
+
+ return 0;
+}
+
+int dwxlgmac2_setup(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ dev_info(priv->device, "\tXLGMAC\n");
+
+ priv->dev->priv_flags |= IFF_UNICAST_FLT;
+ mac->pcsr = priv->ioaddr;
+ mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
+ mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
+ mac->mcast_bits_log2 = 0;
+
+ if (mac->multicast_filter_bins)
+ mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
+ mac->link.duplex = 0;
+ mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
+ mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
+ mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
+ mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
+ mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
+ mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
+ mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
+ mac->link.speed_mask = XLGMAC_CONFIG_SS;
+
+ mac->mii.addr = XGMAC_MDIO_ADDR;
+ mac->mii.data = XGMAC_MDIO_DATA;
+ mac->mii.addr_shift = 16;
+ mac->mii.addr_mask = GENMASK(20, 16);
+ mac->mii.reg_shift = 0;
+ mac->mii.reg_mask = GENMASK(15, 0);
+ mac->mii.clk_csr_shift = 19;
+ mac->mii.clk_csr_mask = GENMASK(21, 19);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
new file mode 100644
index 000000000..b1f0c3984
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_descs.c
@@ -0,0 +1,373 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "dwxgmac2.h"
+
+static int dwxgmac2_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+ int ret = tx_done;
+
+ if (unlikely(tdes3 & XGMAC_TDES3_OWN))
+ return tx_dma_own;
+ if (likely(!(tdes3 & XGMAC_TDES3_LD)))
+ return tx_not_ls;
+
+ return ret;
+}
+
+static int dwxgmac2_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+
+ if (unlikely(rdes3 & XGMAC_RDES3_OWN))
+ return dma_own;
+ if (unlikely(rdes3 & XGMAC_RDES3_CTXT))
+ return discard_frame;
+ if (likely(!(rdes3 & XGMAC_RDES3_LD)))
+ return rx_not_ls;
+ if (unlikely((rdes3 & XGMAC_RDES3_ES) && (rdes3 & XGMAC_RDES3_LD)))
+ return discard_frame;
+
+ return good_frame;
+}
+
+static int dwxgmac2_get_tx_len(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des2) & XGMAC_TDES2_B1L);
+}
+
+static int dwxgmac2_get_tx_owner(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_TDES3_OWN) > 0;
+}
+
+static void dwxgmac2_set_tx_owner(struct dma_desc *p)
+{
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_OWN);
+}
+
+static void dwxgmac2_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
+{
+ p->des3 |= cpu_to_le32(XGMAC_RDES3_OWN);
+
+ if (!disable_rx_ic)
+ p->des3 |= cpu_to_le32(XGMAC_RDES3_IOC);
+}
+
+static int dwxgmac2_get_tx_ls(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_RDES3_LD) > 0;
+}
+
+static int dwxgmac2_get_rx_frame_len(struct dma_desc *p, int rx_coe)
+{
+ return (le32_to_cpu(p->des3) & XGMAC_RDES3_PL);
+}
+
+static void dwxgmac2_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(XGMAC_TDES2_TTSE);
+}
+
+static int dwxgmac2_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return 0; /* Not supported */
+}
+
+static inline void dwxgmac2_get_timestamp(void *desc, u32 ats, u64 *ts)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns = 0;
+
+ ns += le32_to_cpu(p->des1) * 1000000000ULL;
+ ns += le32_to_cpu(p->des0);
+
+ *ts = ns;
+}
+
+static int dwxgmac2_rx_check_timestamp(void *desc)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ bool desc_valid, ts_valid;
+
+ dma_rmb();
+
+ desc_valid = !(rdes3 & XGMAC_RDES3_OWN) && (rdes3 & XGMAC_RDES3_CTXT);
+ ts_valid = !(rdes3 & XGMAC_RDES3_TSD) && (rdes3 & XGMAC_RDES3_TSA);
+
+ if (likely(desc_valid && ts_valid)) {
+ if ((p->des0 == 0xffffffff) && (p->des1 == 0xffffffff))
+ return -EINVAL;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int dwxgmac2_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ int ret = -EBUSY;
+
+ if (likely(rdes3 & XGMAC_RDES3_CDA))
+ ret = dwxgmac2_rx_check_timestamp(next_desc);
+
+ return !ret;
+}
+
+static void dwxgmac2_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end, int bfsize)
+{
+ dwxgmac2_set_rx_owner(p, disable_rx_ic);
+}
+
+static void dwxgmac2_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwxgmac2_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls, unsigned int tot_pkt_len)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ p->des2 |= cpu_to_le32(len & XGMAC_TDES2_B1L);
+
+ tdes3 |= tot_pkt_len & XGMAC_TDES3_FL;
+ if (is_fs)
+ tdes3 |= XGMAC_TDES3_FD;
+ else
+ tdes3 &= ~XGMAC_TDES3_FD;
+
+ if (csum_flag)
+ tdes3 |= 0x3 << XGMAC_TDES3_CIC_SHIFT;
+ else
+ tdes3 &= ~XGMAC_TDES3_CIC;
+
+ if (ls)
+ tdes3 |= XGMAC_TDES3_LD;
+ else
+ tdes3 &= ~XGMAC_TDES3_LD;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= XGMAC_TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwxgmac2_prepare_tso_tx_desc(struct dma_desc *p, int is_fs,
+ int len1, int len2, bool tx_own,
+ bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen)
+{
+ unsigned int tdes3 = le32_to_cpu(p->des3);
+
+ if (len1)
+ p->des2 |= cpu_to_le32(len1 & XGMAC_TDES2_B1L);
+ if (len2)
+ p->des2 |= cpu_to_le32((len2 << XGMAC_TDES2_B2L_SHIFT) &
+ XGMAC_TDES2_B2L);
+ if (is_fs) {
+ tdes3 |= XGMAC_TDES3_FD | XGMAC_TDES3_TSE;
+ tdes3 |= (tcphdrlen << XGMAC_TDES3_THL_SHIFT) &
+ XGMAC_TDES3_THL;
+ tdes3 |= tcppayloadlen & XGMAC_TDES3_TPL;
+ } else {
+ tdes3 &= ~XGMAC_TDES3_FD;
+ }
+
+ if (ls)
+ tdes3 |= XGMAC_TDES3_LD;
+ else
+ tdes3 &= ~XGMAC_TDES3_LD;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes3 |= XGMAC_TDES3_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des3 = cpu_to_le32(tdes3);
+}
+
+static void dwxgmac2_release_tx_desc(struct dma_desc *p, int mode)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static void dwxgmac2_set_tx_ic(struct dma_desc *p)
+{
+ p->des2 |= cpu_to_le32(XGMAC_TDES2_IOC);
+}
+
+static void dwxgmac2_set_mss(struct dma_desc *p, unsigned int mss)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = cpu_to_le32(mss);
+ p->des3 = cpu_to_le32(XGMAC_TDES3_CTXT | XGMAC_TDES3_TCMSSV);
+}
+
+static void dwxgmac2_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des0 = cpu_to_le32(lower_32_bits(addr));
+ p->des1 = cpu_to_le32(upper_32_bits(addr));
+}
+
+static void dwxgmac2_clear(struct dma_desc *p)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+}
+
+static int dwxgmac2_get_rx_hash(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type)
+{
+ unsigned int rdes3 = le32_to_cpu(p->des3);
+ u32 ptype;
+
+ if (rdes3 & XGMAC_RDES3_RSV) {
+ ptype = (rdes3 & XGMAC_RDES3_L34T) >> XGMAC_RDES3_L34T_SHIFT;
+
+ switch (ptype) {
+ case XGMAC_L34T_IP4TCP:
+ case XGMAC_L34T_IP4UDP:
+ case XGMAC_L34T_IP6TCP:
+ case XGMAC_L34T_IP6UDP:
+ *type = PKT_HASH_TYPE_L4;
+ break;
+ default:
+ *type = PKT_HASH_TYPE_L3;
+ break;
+ }
+
+ *hash = le32_to_cpu(p->des1);
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static void dwxgmac2_get_rx_header_len(struct dma_desc *p, unsigned int *len)
+{
+ if (le32_to_cpu(p->des3) & XGMAC_RDES3_L34T)
+ *len = le32_to_cpu(p->des2) & XGMAC_RDES2_HL;
+}
+
+static void dwxgmac2_set_sec_addr(struct dma_desc *p, dma_addr_t addr, bool is_valid)
+{
+ p->des2 = cpu_to_le32(lower_32_bits(addr));
+ p->des3 = cpu_to_le32(upper_32_bits(addr));
+}
+
+static void dwxgmac2_set_sarc(struct dma_desc *p, u32 sarc_type)
+{
+ sarc_type <<= XGMAC_TDES3_SAIC_SHIFT;
+
+ p->des3 |= cpu_to_le32(sarc_type & XGMAC_TDES3_SAIC);
+}
+
+static void dwxgmac2_set_vlan_tag(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type)
+{
+ p->des0 = 0;
+ p->des1 = 0;
+ p->des2 = 0;
+ p->des3 = 0;
+
+ /* Inner VLAN */
+ if (inner_type) {
+ u32 des = inner_tag << XGMAC_TDES2_IVT_SHIFT;
+
+ des &= XGMAC_TDES2_IVT;
+ p->des2 = cpu_to_le32(des);
+
+ des = inner_type << XGMAC_TDES3_IVTIR_SHIFT;
+ des &= XGMAC_TDES3_IVTIR;
+ p->des3 = cpu_to_le32(des | XGMAC_TDES3_IVLTV);
+ }
+
+ /* Outer VLAN */
+ p->des3 |= cpu_to_le32(tag & XGMAC_TDES3_VT);
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_VLTV);
+
+ p->des3 |= cpu_to_le32(XGMAC_TDES3_CTXT);
+}
+
+static void dwxgmac2_set_vlan(struct dma_desc *p, u32 type)
+{
+ type <<= XGMAC_TDES2_VTIR_SHIFT;
+ p->des2 |= cpu_to_le32(type & XGMAC_TDES2_VTIR);
+}
+
+static void dwxgmac2_set_tbs(struct dma_edesc *p, u32 sec, u32 nsec)
+{
+ p->des4 = cpu_to_le32((sec & XGMAC_TDES0_LT) | XGMAC_TDES0_LTV);
+ p->des5 = cpu_to_le32(nsec & XGMAC_TDES1_LT);
+ p->des6 = 0;
+ p->des7 = 0;
+}
+
+const struct stmmac_desc_ops dwxgmac210_desc_ops = {
+ .tx_status = dwxgmac2_get_tx_status,
+ .rx_status = dwxgmac2_get_rx_status,
+ .get_tx_len = dwxgmac2_get_tx_len,
+ .get_tx_owner = dwxgmac2_get_tx_owner,
+ .set_tx_owner = dwxgmac2_set_tx_owner,
+ .set_rx_owner = dwxgmac2_set_rx_owner,
+ .get_tx_ls = dwxgmac2_get_tx_ls,
+ .get_rx_frame_len = dwxgmac2_get_rx_frame_len,
+ .enable_tx_timestamp = dwxgmac2_enable_tx_timestamp,
+ .get_tx_timestamp_status = dwxgmac2_get_tx_timestamp_status,
+ .get_rx_timestamp_status = dwxgmac2_get_rx_timestamp_status,
+ .get_timestamp = dwxgmac2_get_timestamp,
+ .set_tx_ic = dwxgmac2_set_tx_ic,
+ .prepare_tx_desc = dwxgmac2_prepare_tx_desc,
+ .prepare_tso_tx_desc = dwxgmac2_prepare_tso_tx_desc,
+ .release_tx_desc = dwxgmac2_release_tx_desc,
+ .init_rx_desc = dwxgmac2_init_rx_desc,
+ .init_tx_desc = dwxgmac2_init_tx_desc,
+ .set_mss = dwxgmac2_set_mss,
+ .set_addr = dwxgmac2_set_addr,
+ .clear = dwxgmac2_clear,
+ .get_rx_hash = dwxgmac2_get_rx_hash,
+ .get_rx_header_len = dwxgmac2_get_rx_header_len,
+ .set_sec_addr = dwxgmac2_set_sec_addr,
+ .set_sarc = dwxgmac2_set_sarc,
+ .set_vlan_tag = dwxgmac2_set_vlan_tag,
+ .set_vlan = dwxgmac2_set_vlan,
+ .set_tbs = dwxgmac2_set_tbs,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
new file mode 100644
index 000000000..5e98355f4
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c
@@ -0,0 +1,582 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac XGMAC support.
+ */
+
+#include <linux/iopoll.h>
+#include "stmmac.h"
+#include "dwxgmac2.h"
+
+static int dwxgmac2_dma_reset(void __iomem *ioaddr)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_MODE);
+
+ /* DMA SW reset */
+ writel(value | XGMAC_SWR, ioaddr + XGMAC_DMA_MODE);
+
+ return readl_poll_timeout(ioaddr + XGMAC_DMA_MODE, value,
+ !(value & XGMAC_SWR), 0, 100000);
+}
+
+static void dwxgmac2_dma_init(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, int atds)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
+
+ if (dma_cfg->aal)
+ value |= XGMAC_AAL;
+
+ if (dma_cfg->eame)
+ value |= XGMAC_EAME;
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+}
+
+static void dwxgmac2_dma_init_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+
+ if (dma_cfg->pblx8)
+ value |= XGMAC_PBLx8;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+ writel(XGMAC_DMA_INT_DEFAULT_EN, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_dma_init_rx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t phy, u32 chan)
+{
+ u32 rxpbl = dma_cfg->rxpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value &= ~XGMAC_RxPBL;
+ value |= (rxpbl << XGMAC_RxPBL_SHIFT) & XGMAC_RxPBL;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_HADDR(chan));
+ writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_RxDESC_LADDR(chan));
+}
+
+static void dwxgmac2_dma_init_tx_chan(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t phy, u32 chan)
+{
+ u32 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value &= ~XGMAC_TxPBL;
+ value |= (txpbl << XGMAC_TxPBL_SHIFT) & XGMAC_TxPBL;
+ value |= XGMAC_OSP;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ writel(upper_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_HADDR(chan));
+ writel(lower_32_bits(phy), ioaddr + XGMAC_DMA_CH_TxDESC_LADDR(chan));
+}
+
+static void dwxgmac2_dma_axi(void __iomem *ioaddr, struct stmmac_axi *axi)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ int i;
+
+ if (axi->axi_lpi_en)
+ value |= XGMAC_EN_LPI;
+ if (axi->axi_xit_frm)
+ value |= XGMAC_LPI_XIT_PKT;
+
+ value &= ~XGMAC_WR_OSR_LMT;
+ value |= (axi->axi_wr_osr_lmt << XGMAC_WR_OSR_LMT_SHIFT) &
+ XGMAC_WR_OSR_LMT;
+
+ value &= ~XGMAC_RD_OSR_LMT;
+ value |= (axi->axi_rd_osr_lmt << XGMAC_RD_OSR_LMT_SHIFT) &
+ XGMAC_RD_OSR_LMT;
+
+ if (!axi->axi_fb)
+ value |= XGMAC_UNDEF;
+
+ value &= ~XGMAC_BLEN;
+ for (i = 0; i < AXI_BLEN; i++) {
+ switch (axi->axi_blen[i]) {
+ case 256:
+ value |= XGMAC_BLEN256;
+ break;
+ case 128:
+ value |= XGMAC_BLEN128;
+ break;
+ case 64:
+ value |= XGMAC_BLEN64;
+ break;
+ case 32:
+ value |= XGMAC_BLEN32;
+ break;
+ case 16:
+ value |= XGMAC_BLEN16;
+ break;
+ case 8:
+ value |= XGMAC_BLEN8;
+ break;
+ case 4:
+ value |= XGMAC_BLEN4;
+ break;
+ }
+ }
+
+ writel(value, ioaddr + XGMAC_DMA_SYSBUS_MODE);
+ writel(XGMAC_TDPS, ioaddr + XGMAC_TX_EDMA_CTRL);
+ writel(XGMAC_RDPS, ioaddr + XGMAC_RX_EDMA_CTRL);
+}
+
+static void dwxgmac2_dma_dump_regs(void __iomem *ioaddr, u32 *reg_space)
+{
+ int i;
+
+ for (i = (XGMAC_DMA_MODE / 4); i < XGMAC_REGSIZE; i++)
+ reg_space[i] = readl(ioaddr + i * 4);
+}
+
+static void dwxgmac2_dma_rx_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
+ unsigned int rqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ value |= XGMAC_RSF;
+ } else {
+ value &= ~XGMAC_RSF;
+ value &= ~XGMAC_RTC;
+
+ if (mode <= 64)
+ value |= 0x0 << XGMAC_RTC_SHIFT;
+ else if (mode <= 96)
+ value |= 0x2 << XGMAC_RTC_SHIFT;
+ else
+ value |= 0x3 << XGMAC_RTC_SHIFT;
+ }
+
+ value &= ~XGMAC_RQS;
+ value |= (rqs << XGMAC_RQS_SHIFT) & XGMAC_RQS;
+
+ if ((fifosz >= 4096) && (qmode != MTL_QUEUE_AVB)) {
+ u32 flow = readl(ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+ unsigned int rfd, rfa;
+
+ value |= XGMAC_EHFC;
+
+ /* Set Threshold for Activating Flow Control to min 2 frames,
+ * i.e. 1500 * 2 = 3000 bytes.
+ *
+ * Set Threshold for Deactivating Flow Control to min 1 frame,
+ * i.e. 1500 bytes.
+ */
+ switch (fifosz) {
+ case 4096:
+ /* This violates the above formula because of FIFO size
+ * limit therefore overflow may occur in spite of this.
+ */
+ rfd = 0x03; /* Full-2.5K */
+ rfa = 0x01; /* Full-1.5K */
+ break;
+
+ default:
+ rfd = 0x07; /* Full-4.5K */
+ rfa = 0x04; /* Full-3K */
+ break;
+ }
+
+ flow &= ~XGMAC_RFD;
+ flow |= rfd << XGMAC_RFD_SHIFT;
+
+ flow &= ~XGMAC_RFA;
+ flow |= rfa << XGMAC_RFA_SHIFT;
+
+ writel(flow, ioaddr + XGMAC_MTL_RXQ_FLOW_CONTROL(channel));
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_RXQ_OPMODE(channel));
+
+ /* Enable MTL RX overflow */
+ value = readl(ioaddr + XGMAC_MTL_QINTEN(channel));
+ writel(value | XGMAC_RXOIE, ioaddr + XGMAC_MTL_QINTEN(channel));
+}
+
+static void dwxgmac2_dma_tx_mode(void __iomem *ioaddr, int mode,
+ u32 channel, int fifosz, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+ unsigned int tqs = fifosz / 256 - 1;
+
+ if (mode == SF_DMA_MODE) {
+ value |= XGMAC_TSF;
+ } else {
+ value &= ~XGMAC_TSF;
+ value &= ~XGMAC_TTC;
+
+ if (mode <= 64)
+ value |= 0x0 << XGMAC_TTC_SHIFT;
+ else if (mode <= 96)
+ value |= 0x2 << XGMAC_TTC_SHIFT;
+ else if (mode <= 128)
+ value |= 0x3 << XGMAC_TTC_SHIFT;
+ else if (mode <= 192)
+ value |= 0x4 << XGMAC_TTC_SHIFT;
+ else if (mode <= 256)
+ value |= 0x5 << XGMAC_TTC_SHIFT;
+ else if (mode <= 384)
+ value |= 0x6 << XGMAC_TTC_SHIFT;
+ else
+ value |= 0x7 << XGMAC_TTC_SHIFT;
+ }
+
+ /* Use static TC to Queue mapping */
+ value |= (channel << XGMAC_Q2TCMAP_SHIFT) & XGMAC_Q2TCMAP;
+
+ value &= ~XGMAC_TXQEN;
+ if (qmode != MTL_QUEUE_AVB)
+ value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ else
+ value |= 0x1 << XGMAC_TXQEN_SHIFT;
+
+ value &= ~XGMAC_TQS;
+ value |= (tqs << XGMAC_TQS_SHIFT) & XGMAC_TQS;
+
+ writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+}
+
+static void dwxgmac2_enable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+
+ if (rx)
+ value |= XGMAC_DMA_INT_DEFAULT_RX;
+ if (tx)
+ value |= XGMAC_DMA_INT_DEFAULT_TX;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_disable_dma_irq(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+
+ if (rx)
+ value &= ~XGMAC_DMA_INT_DEFAULT_RX;
+ if (tx)
+ value &= ~XGMAC_DMA_INT_DEFAULT_TX;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+}
+
+static void dwxgmac2_dma_start_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value |= XGMAC_TXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_TX_CONFIG);
+ value |= XGMAC_CONFIG_TE;
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_dma_stop_tx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+ value &= ~XGMAC_TXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_TX_CONFIG);
+ value &= ~XGMAC_CONFIG_TE;
+ writel(value, ioaddr + XGMAC_TX_CONFIG);
+}
+
+static void dwxgmac2_dma_start_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value |= XGMAC_RXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_RX_CONFIG);
+ value |= XGMAC_CONFIG_RE;
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+}
+
+static void dwxgmac2_dma_stop_rx(void __iomem *ioaddr, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value &= ~XGMAC_RXST;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+}
+
+static int dwxgmac2_dma_interrupt(void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan,
+ u32 dir)
+{
+ u32 intr_status = readl(ioaddr + XGMAC_DMA_CH_STATUS(chan));
+ u32 intr_en = readl(ioaddr + XGMAC_DMA_CH_INT_EN(chan));
+ int ret = 0;
+
+ if (dir == DMA_DIR_RX)
+ intr_status &= XGMAC_DMA_STATUS_MSK_RX;
+ else if (dir == DMA_DIR_TX)
+ intr_status &= XGMAC_DMA_STATUS_MSK_TX;
+
+ /* ABNORMAL interrupts */
+ if (unlikely(intr_status & XGMAC_AIS)) {
+ if (unlikely(intr_status & XGMAC_RBU)) {
+ x->rx_buf_unav_irq++;
+ ret |= handle_rx;
+ }
+ if (unlikely(intr_status & XGMAC_TPS)) {
+ x->tx_process_stopped_irq++;
+ ret |= tx_hard_error;
+ }
+ if (unlikely(intr_status & XGMAC_FBE)) {
+ x->fatal_bus_error_irq++;
+ ret |= tx_hard_error;
+ }
+ }
+
+ /* TX/RX NORMAL interrupts */
+ if (likely(intr_status & XGMAC_NIS)) {
+ x->normal_irq_n++;
+
+ if (likely(intr_status & XGMAC_RI)) {
+ x->rx_normal_irq_n++;
+ ret |= handle_rx;
+ }
+ if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) {
+ x->tx_normal_irq_n++;
+ ret |= handle_tx;
+ }
+ }
+
+ /* Clear interrupts */
+ writel(intr_en & intr_status, ioaddr + XGMAC_DMA_CH_STATUS(chan));
+
+ return ret;
+}
+
+static int dwxgmac2_get_hw_feature(void __iomem *ioaddr,
+ struct dma_features *dma_cap)
+{
+ u32 hw_cap;
+
+ /* MAC HW feature 0 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE0);
+ dma_cap->vlins = (hw_cap & XGMAC_HWFEAT_SAVLANINS) >> 27;
+ dma_cap->rx_coe = (hw_cap & XGMAC_HWFEAT_RXCOESEL) >> 16;
+ dma_cap->tx_coe = (hw_cap & XGMAC_HWFEAT_TXCOESEL) >> 14;
+ dma_cap->eee = (hw_cap & XGMAC_HWFEAT_EEESEL) >> 13;
+ dma_cap->atime_stamp = (hw_cap & XGMAC_HWFEAT_TSSEL) >> 12;
+ dma_cap->av = (hw_cap & XGMAC_HWFEAT_AVSEL) >> 11;
+ dma_cap->av &= !((hw_cap & XGMAC_HWFEAT_RAVSEL) >> 10);
+ dma_cap->arpoffsel = (hw_cap & XGMAC_HWFEAT_ARPOFFSEL) >> 9;
+ dma_cap->rmon = (hw_cap & XGMAC_HWFEAT_MMCSEL) >> 8;
+ dma_cap->pmt_magic_frame = (hw_cap & XGMAC_HWFEAT_MGKSEL) >> 7;
+ dma_cap->pmt_remote_wake_up = (hw_cap & XGMAC_HWFEAT_RWKSEL) >> 6;
+ dma_cap->vlhash = (hw_cap & XGMAC_HWFEAT_VLHASH) >> 4;
+ dma_cap->mbps_1000 = (hw_cap & XGMAC_HWFEAT_GMIISEL) >> 1;
+
+ /* MAC HW feature 1 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE1);
+ dma_cap->l3l4fnum = (hw_cap & XGMAC_HWFEAT_L3L4FNUM) >> 27;
+ dma_cap->hash_tb_sz = (hw_cap & XGMAC_HWFEAT_HASHTBLSZ) >> 24;
+ dma_cap->rssen = (hw_cap & XGMAC_HWFEAT_RSSEN) >> 20;
+ dma_cap->tsoen = (hw_cap & XGMAC_HWFEAT_TSOEN) >> 18;
+ dma_cap->sphen = (hw_cap & XGMAC_HWFEAT_SPHEN) >> 17;
+
+ dma_cap->addr64 = (hw_cap & XGMAC_HWFEAT_ADDR64) >> 14;
+ switch (dma_cap->addr64) {
+ case 0:
+ dma_cap->addr64 = 32;
+ break;
+ case 1:
+ dma_cap->addr64 = 40;
+ break;
+ case 2:
+ dma_cap->addr64 = 48;
+ break;
+ default:
+ dma_cap->addr64 = 32;
+ break;
+ }
+
+ dma_cap->tx_fifo_size =
+ 128 << ((hw_cap & XGMAC_HWFEAT_TXFIFOSIZE) >> 6);
+ dma_cap->rx_fifo_size =
+ 128 << ((hw_cap & XGMAC_HWFEAT_RXFIFOSIZE) >> 0);
+
+ /* MAC HW feature 2 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE2);
+ dma_cap->pps_out_num = (hw_cap & XGMAC_HWFEAT_PPSOUTNUM) >> 24;
+ dma_cap->number_tx_channel =
+ ((hw_cap & XGMAC_HWFEAT_TXCHCNT) >> 18) + 1;
+ dma_cap->number_rx_channel =
+ ((hw_cap & XGMAC_HWFEAT_RXCHCNT) >> 12) + 1;
+ dma_cap->number_tx_queues =
+ ((hw_cap & XGMAC_HWFEAT_TXQCNT) >> 6) + 1;
+ dma_cap->number_rx_queues =
+ ((hw_cap & XGMAC_HWFEAT_RXQCNT) >> 0) + 1;
+
+ /* MAC HW feature 3 */
+ hw_cap = readl(ioaddr + XGMAC_HW_FEATURE3);
+ dma_cap->tbssel = (hw_cap & XGMAC_HWFEAT_TBSSEL) >> 27;
+ dma_cap->fpesel = (hw_cap & XGMAC_HWFEAT_FPESEL) >> 26;
+ dma_cap->estwid = (hw_cap & XGMAC_HWFEAT_ESTWID) >> 23;
+ dma_cap->estdep = (hw_cap & XGMAC_HWFEAT_ESTDEP) >> 20;
+ dma_cap->estsel = (hw_cap & XGMAC_HWFEAT_ESTSEL) >> 19;
+ dma_cap->asp = (hw_cap & XGMAC_HWFEAT_ASP) >> 14;
+ dma_cap->dvlan = (hw_cap & XGMAC_HWFEAT_DVLAN) >> 13;
+ dma_cap->frpes = (hw_cap & XGMAC_HWFEAT_FRPES) >> 11;
+ dma_cap->frpbs = (hw_cap & XGMAC_HWFEAT_FRPPB) >> 9;
+ dma_cap->frpsel = (hw_cap & XGMAC_HWFEAT_FRPSEL) >> 3;
+
+ return 0;
+}
+
+static void dwxgmac2_rx_watchdog(void __iomem *ioaddr, u32 riwt, u32 queue)
+{
+ writel(riwt & XGMAC_RWT, ioaddr + XGMAC_DMA_CH_Rx_WATCHDOG(queue));
+}
+
+static void dwxgmac2_set_rx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + XGMAC_DMA_CH_RxDESC_RING_LEN(chan));
+}
+
+static void dwxgmac2_set_tx_ring_len(void __iomem *ioaddr, u32 len, u32 chan)
+{
+ writel(len, ioaddr + XGMAC_DMA_CH_TxDESC_RING_LEN(chan));
+}
+
+static void dwxgmac2_set_rx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+{
+ writel(ptr, ioaddr + XGMAC_DMA_CH_RxDESC_TAIL_LPTR(chan));
+}
+
+static void dwxgmac2_set_tx_tail_ptr(void __iomem *ioaddr, u32 ptr, u32 chan)
+{
+ writel(ptr, ioaddr + XGMAC_DMA_CH_TxDESC_TAIL_LPTR(chan));
+}
+
+static void dwxgmac2_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ if (en)
+ value |= XGMAC_TSE;
+ else
+ value &= ~XGMAC_TSE;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+}
+
+static void dwxgmac2_qmode(void __iomem *ioaddr, u32 channel, u8 qmode)
+{
+ u32 value = readl(ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+ u32 flow = readl(ioaddr + XGMAC_RX_FLOW_CTRL);
+
+ value &= ~XGMAC_TXQEN;
+ if (qmode != MTL_QUEUE_AVB) {
+ value |= 0x2 << XGMAC_TXQEN_SHIFT;
+ writel(0, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(channel));
+ } else {
+ value |= 0x1 << XGMAC_TXQEN_SHIFT;
+ writel(flow & (~XGMAC_RFE), ioaddr + XGMAC_RX_FLOW_CTRL);
+ }
+
+ writel(value, ioaddr + XGMAC_MTL_TXQ_OPMODE(channel));
+}
+
+static void dwxgmac2_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+ u32 value;
+
+ value = readl(ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+ value &= ~XGMAC_RBSZ;
+ value |= bfsize << XGMAC_RBSZ_SHIFT;
+ writel(value, ioaddr + XGMAC_DMA_CH_RX_CONTROL(chan));
+}
+
+static void dwxgmac2_enable_sph(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
+
+ value &= ~XGMAC_CONFIG_HDSMS;
+ value |= XGMAC_CONFIG_HDSMS_256; /* Segment max 256 bytes */
+ writel(value, ioaddr + XGMAC_RX_CONFIG);
+
+ value = readl(ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+ if (en)
+ value |= XGMAC_SPH;
+ else
+ value &= ~XGMAC_SPH;
+ writel(value, ioaddr + XGMAC_DMA_CH_CONTROL(chan));
+}
+
+static int dwxgmac2_enable_tbs(void __iomem *ioaddr, bool en, u32 chan)
+{
+ u32 value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ if (en)
+ value |= XGMAC_EDSE;
+ else
+ value &= ~XGMAC_EDSE;
+
+ writel(value, ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan));
+
+ value = readl(ioaddr + XGMAC_DMA_CH_TX_CONTROL(chan)) & XGMAC_EDSE;
+ if (en && !value)
+ return -EIO;
+
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL0);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL1);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL2);
+ writel(XGMAC_DEF_FTOS, ioaddr + XGMAC_DMA_TBS_CTRL3);
+ return 0;
+}
+
+const struct stmmac_dma_ops dwxgmac210_dma_ops = {
+ .reset = dwxgmac2_dma_reset,
+ .init = dwxgmac2_dma_init,
+ .init_chan = dwxgmac2_dma_init_chan,
+ .init_rx_chan = dwxgmac2_dma_init_rx_chan,
+ .init_tx_chan = dwxgmac2_dma_init_tx_chan,
+ .axi = dwxgmac2_dma_axi,
+ .dump_regs = dwxgmac2_dma_dump_regs,
+ .dma_rx_mode = dwxgmac2_dma_rx_mode,
+ .dma_tx_mode = dwxgmac2_dma_tx_mode,
+ .enable_dma_irq = dwxgmac2_enable_dma_irq,
+ .disable_dma_irq = dwxgmac2_disable_dma_irq,
+ .start_tx = dwxgmac2_dma_start_tx,
+ .stop_tx = dwxgmac2_dma_stop_tx,
+ .start_rx = dwxgmac2_dma_start_rx,
+ .stop_rx = dwxgmac2_dma_stop_rx,
+ .dma_interrupt = dwxgmac2_dma_interrupt,
+ .get_hw_feature = dwxgmac2_get_hw_feature,
+ .rx_watchdog = dwxgmac2_rx_watchdog,
+ .set_rx_ring_len = dwxgmac2_set_rx_ring_len,
+ .set_tx_ring_len = dwxgmac2_set_tx_ring_len,
+ .set_rx_tail_ptr = dwxgmac2_set_rx_tail_ptr,
+ .set_tx_tail_ptr = dwxgmac2_set_tx_tail_ptr,
+ .enable_tso = dwxgmac2_enable_tso,
+ .qmode = dwxgmac2_qmode,
+ .set_bfsize = dwxgmac2_set_bfsize,
+ .enable_sph = dwxgmac2_enable_sph,
+ .enable_tbs = dwxgmac2_enable_tbs,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h b/drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h
new file mode 100644
index 000000000..726090d49
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwxlgmac2.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2020 Synopsys, Inc. and/or its affiliates.
+ * Synopsys DesignWare XLGMAC definitions.
+ */
+
+#ifndef __STMMAC_DWXLGMAC2_H__
+#define __STMMAC_DWXLGMAC2_H__
+
+/* MAC Registers */
+#define XLGMAC_CONFIG_SS GENMASK(30, 28)
+#define XLGMAC_CONFIG_SS_SHIFT 28
+#define XLGMAC_CONFIG_SS_40G (0x0 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_25G (0x1 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_50G (0x2 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_100G (0x3 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_10G (0x4 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_2500 (0x6 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_CONFIG_SS_1000 (0x7 << XLGMAC_CONFIG_SS_SHIFT)
+#define XLGMAC_RXQ_ENABLE_CTRL0 0x00000140
+
+#endif /* __STMMAC_DWXLGMAC2_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
new file mode 100644
index 000000000..1bcbbd724
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -0,0 +1,475 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This contains the functions to handle the enhanced descriptors.
+
+ Copyright (C) 2007-2014 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "descs_com.h"
+
+static int enh_desc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes0 = le32_to_cpu(p->des0);
+ int ret = tx_done;
+
+ /* Get tx owner first */
+ if (unlikely(tdes0 & ETDES0_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes0 & ETDES0_LAST_SEGMENT)))
+ return tx_not_ls;
+
+ if (unlikely(tdes0 & ETDES0_ERROR_SUMMARY)) {
+ if (unlikely(tdes0 & ETDES0_JABBER_TIMEOUT))
+ x->tx_jabber++;
+
+ if (unlikely(tdes0 & ETDES0_FRAME_FLUSHED)) {
+ x->tx_frame_flushed++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ if (unlikely(tdes0 & ETDES0_LOSS_CARRIER)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(tdes0 & ETDES0_NO_CARRIER)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((tdes0 & ETDES0_LATE_COLLISION) ||
+ (tdes0 & ETDES0_EXCESSIVE_COLLISIONS)))
+ stats->collisions +=
+ (tdes0 & ETDES0_COLLISION_COUNT_MASK) >> 3;
+
+ if (unlikely(tdes0 & ETDES0_EXCESSIVE_DEFERRAL))
+ x->tx_deferred++;
+
+ if (unlikely(tdes0 & ETDES0_UNDERFLOW_ERROR)) {
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ x->tx_underflow++;
+ }
+
+ if (unlikely(tdes0 & ETDES0_IP_HEADER_ERROR))
+ x->tx_ip_header_error++;
+
+ if (unlikely(tdes0 & ETDES0_PAYLOAD_ERROR)) {
+ x->tx_payload_error++;
+ dwmac_dma_flush_tx_fifo(ioaddr);
+ }
+
+ ret = tx_err;
+ }
+
+ if (unlikely(tdes0 & ETDES0_DEFERRED))
+ x->tx_deferred++;
+
+#ifdef STMMAC_VLAN_TAG_USED
+ if (tdes0 & ETDES0_VLAN_FRAME)
+ x->tx_vlan++;
+#endif
+
+ return ret;
+}
+
+static int enh_desc_get_tx_len(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des1) & ETDES1_BUFFER1_SIZE_MASK);
+}
+
+static int enh_desc_coe_rdes0(int ipc_err, int type, int payload_err)
+{
+ int ret = good_frame;
+ u32 status = (type << 2 | ipc_err << 1 | payload_err) & 0x7;
+
+ /* bits 5 7 0 | Frame status
+ * ----------------------------------------------------------
+ * 0 0 0 | IEEE 802.3 Type frame (length < 1536 octects)
+ * 1 0 0 | IPv4/6 No CSUM errorS.
+ * 1 0 1 | IPv4/6 CSUM PAYLOAD error
+ * 1 1 0 | IPv4/6 CSUM IP HR error
+ * 1 1 1 | IPv4/6 IP PAYLOAD AND HEADER errorS
+ * 0 0 1 | IPv4/6 unsupported IP PAYLOAD
+ * 0 1 1 | COE bypassed.. no IPv4/6 frame
+ * 0 1 0 | Reserved.
+ */
+ if (status == 0x0)
+ ret = llc_snap;
+ else if (status == 0x4)
+ ret = good_frame;
+ else if (status == 0x5)
+ ret = csum_none;
+ else if (status == 0x6)
+ ret = csum_none;
+ else if (status == 0x7)
+ ret = csum_none;
+ else if (status == 0x1)
+ ret = discard_frame;
+ else if (status == 0x3)
+ ret = discard_frame;
+ return ret;
+}
+
+static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p)
+{
+ unsigned int rdes0 = le32_to_cpu(p->basic.des0);
+ unsigned int rdes4 = le32_to_cpu(p->des4);
+
+ if (unlikely(rdes0 & ERDES0_RX_MAC_ADDR)) {
+ int message_type = (rdes4 & ERDES4_MSG_TYPE_MASK) >> 8;
+
+ if (rdes4 & ERDES4_IP_HDR_ERR)
+ x->ip_hdr_err++;
+ if (rdes4 & ERDES4_IP_PAYLOAD_ERR)
+ x->ip_payload_err++;
+ if (rdes4 & ERDES4_IP_CSUM_BYPASSED)
+ x->ip_csum_bypassed++;
+ if (rdes4 & ERDES4_IPV4_PKT_RCVD)
+ x->ipv4_pkt_rcvd++;
+ if (rdes4 & ERDES4_IPV6_PKT_RCVD)
+ x->ipv6_pkt_rcvd++;
+
+ if (message_type == RDES_EXT_NO_PTP)
+ x->no_ptp_rx_msg_type_ext++;
+ else if (message_type == RDES_EXT_SYNC)
+ x->ptp_rx_msg_type_sync++;
+ else if (message_type == RDES_EXT_FOLLOW_UP)
+ x->ptp_rx_msg_type_follow_up++;
+ else if (message_type == RDES_EXT_DELAY_REQ)
+ x->ptp_rx_msg_type_delay_req++;
+ else if (message_type == RDES_EXT_DELAY_RESP)
+ x->ptp_rx_msg_type_delay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_REQ)
+ x->ptp_rx_msg_type_pdelay_req++;
+ else if (message_type == RDES_EXT_PDELAY_RESP)
+ x->ptp_rx_msg_type_pdelay_resp++;
+ else if (message_type == RDES_EXT_PDELAY_FOLLOW_UP)
+ x->ptp_rx_msg_type_pdelay_follow_up++;
+ else if (message_type == RDES_PTP_ANNOUNCE)
+ x->ptp_rx_msg_type_announce++;
+ else if (message_type == RDES_PTP_MANAGEMENT)
+ x->ptp_rx_msg_type_management++;
+ else if (message_type == RDES_PTP_PKT_RESERVED_TYPE)
+ x->ptp_rx_msg_pkt_reserved_type++;
+
+ if (rdes4 & ERDES4_PTP_FRAME_TYPE)
+ x->ptp_frame_type++;
+ if (rdes4 & ERDES4_PTP_VER)
+ x->ptp_ver++;
+ if (rdes4 & ERDES4_TIMESTAMP_DROPPED)
+ x->timestamp_dropped++;
+ if (rdes4 & ERDES4_AV_PKT_RCVD)
+ x->av_pkt_rcvd++;
+ if (rdes4 & ERDES4_AV_TAGGED_PKT_RCVD)
+ x->av_tagged_pkt_rcvd++;
+ if ((rdes4 & ERDES4_VLAN_TAG_PRI_VAL_MASK) >> 18)
+ x->vlan_tag_priority_val++;
+ if (rdes4 & ERDES4_L3_FILTER_MATCH)
+ x->l3_filter_match++;
+ if (rdes4 & ERDES4_L4_FILTER_MATCH)
+ x->l4_filter_match++;
+ if ((rdes4 & ERDES4_L3_L4_FILT_NO_MATCH_MASK) >> 26)
+ x->l3_l4_filter_no_match++;
+ }
+}
+
+static int enh_desc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int rdes0 = le32_to_cpu(p->des0);
+ int ret = good_frame;
+
+ if (unlikely(rdes0 & RDES0_OWN))
+ return dma_own;
+
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
+ if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR)) {
+ x->rx_desc++;
+ stats->rx_length_errors++;
+ }
+ if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
+ x->rx_gmac_overflow++;
+
+ if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
+ pr_err("\tIPC Csum Error/Giant frame\n");
+
+ if (unlikely(rdes0 & RDES0_COLLISION))
+ stats->collisions++;
+ if (unlikely(rdes0 & RDES0_RECEIVE_WATCHDOG))
+ x->rx_watchdog++;
+
+ if (unlikely(rdes0 & RDES0_MII_ERROR)) /* GMII */
+ x->rx_mii++;
+
+ if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
+ x->rx_crc_errors++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+
+ /* After a payload csum error, the ES bit is set.
+ * It doesn't match with the information reported into the databook.
+ * At any rate, we need to understand if the CSUM hw computation is ok
+ * and report this info to the upper layers. */
+ if (likely(ret == good_frame))
+ ret = enh_desc_coe_rdes0(!!(rdes0 & RDES0_IPC_CSUM_ERROR),
+ !!(rdes0 & RDES0_FRAME_TYPE),
+ !!(rdes0 & ERDES0_RX_MAC_ADDR));
+
+ if (unlikely(rdes0 & RDES0_DRIBBLING))
+ x->dribbling_bit++;
+
+ if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL)) {
+ x->sa_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(rdes0 & RDES0_DA_FILTER_FAIL)) {
+ x->da_rx_filter_fail++;
+ ret = discard_frame;
+ }
+ if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
+ x->rx_length++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (rdes0 & RDES0_VLAN_TAG)
+ x->rx_vlan++;
+#endif
+
+ return ret;
+}
+
+static void enh_desc_init_rx_desc(struct dma_desc *p, int disable_rx_ic,
+ int mode, int end, int bfsize)
+{
+ int bfsize1;
+
+ p->des0 |= cpu_to_le32(RDES0_OWN);
+
+ bfsize1 = min(bfsize, BUF_SIZE_8KiB);
+ p->des1 |= cpu_to_le32(bfsize1 & ERDES1_BUFFER1_SIZE_MASK);
+
+ if (mode == STMMAC_CHAIN_MODE)
+ ehn_desc_rx_set_on_chain(p);
+ else
+ ehn_desc_rx_set_on_ring(p, end, bfsize);
+
+ if (disable_rx_ic)
+ p->des1 |= cpu_to_le32(ERDES1_DISABLE_IC);
+}
+
+static void enh_desc_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 &= cpu_to_le32(~ETDES0_OWN);
+ if (mode == STMMAC_CHAIN_MODE)
+ enh_desc_end_tx_desc_on_chain(p);
+ else
+ enh_desc_end_tx_desc_on_ring(p, end);
+}
+
+static int enh_desc_get_tx_owner(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des0) & ETDES0_OWN) >> 31;
+}
+
+static void enh_desc_set_tx_owner(struct dma_desc *p)
+{
+ p->des0 |= cpu_to_le32(ETDES0_OWN);
+}
+
+static void enh_desc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
+{
+ p->des0 |= cpu_to_le32(RDES0_OWN);
+}
+
+static int enh_desc_get_tx_ls(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des0) & ETDES0_LAST_SEGMENT) >> 29;
+}
+
+static void enh_desc_release_tx_desc(struct dma_desc *p, int mode)
+{
+ int ter = (le32_to_cpu(p->des0) & ETDES0_END_RING) >> 21;
+
+ memset(p, 0, offsetof(struct dma_desc, des2));
+ if (mode == STMMAC_CHAIN_MODE)
+ enh_desc_end_tx_desc_on_chain(p);
+ else
+ enh_desc_end_tx_desc_on_ring(p, ter);
+}
+
+static void enh_desc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls, unsigned int tot_pkt_len)
+{
+ unsigned int tdes0 = le32_to_cpu(p->des0);
+
+ if (mode == STMMAC_CHAIN_MODE)
+ enh_set_tx_desc_len_on_chain(p, len);
+ else
+ enh_set_tx_desc_len_on_ring(p, len);
+
+ if (is_fs)
+ tdes0 |= ETDES0_FIRST_SEGMENT;
+ else
+ tdes0 &= ~ETDES0_FIRST_SEGMENT;
+
+ if (likely(csum_flag))
+ tdes0 |= (TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
+ else
+ tdes0 &= ~(TX_CIC_FULL << ETDES0_CHECKSUM_INSERTION_SHIFT);
+
+ if (ls)
+ tdes0 |= ETDES0_LAST_SEGMENT;
+
+ /* Finally set the OWN bit. Later the DMA will start! */
+ if (tx_own)
+ tdes0 |= ETDES0_OWN;
+
+ if (is_fs && tx_own)
+ /* When the own bit, for the first frame, has to be set, all
+ * descriptors for the same frame has to be set before, to
+ * avoid race condition.
+ */
+ dma_wmb();
+
+ p->des0 = cpu_to_le32(tdes0);
+}
+
+static void enh_desc_set_tx_ic(struct dma_desc *p)
+{
+ p->des0 |= cpu_to_le32(ETDES0_INTERRUPT);
+}
+
+static int enh_desc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
+{
+ unsigned int csum = 0;
+ /* The type-1 checksum offload engines append the checksum at
+ * the end of frame and the two bytes of checksum are added in
+ * the length.
+ * Adjust for that in the framelen for type-1 checksum offload
+ * engines.
+ */
+ if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+ csum = 2;
+
+ return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
+ >> RDES0_FRAME_LEN_SHIFT) - csum);
+}
+
+static void enh_desc_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des0 |= cpu_to_le32(ETDES0_TIME_STAMP_ENABLE);
+}
+
+static int enh_desc_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des0) & ETDES0_TIME_STAMP_STATUS) >> 17;
+}
+
+static void enh_desc_get_timestamp(void *desc, u32 ats, u64 *ts)
+{
+ u64 ns;
+
+ if (ats) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+ ns = le32_to_cpu(p->des6);
+ /* convert high/sec time stamp value to nanosecond */
+ ns += le32_to_cpu(p->des7) * 1000000000ULL;
+ } else {
+ struct dma_desc *p = (struct dma_desc *)desc;
+ ns = le32_to_cpu(p->des2);
+ ns += le32_to_cpu(p->des3) * 1000000000ULL;
+ }
+
+ *ts = ns;
+}
+
+static int enh_desc_get_rx_timestamp_status(void *desc, void *next_desc,
+ u32 ats)
+{
+ if (ats) {
+ struct dma_extended_desc *p = (struct dma_extended_desc *)desc;
+ return (le32_to_cpu(p->basic.des0) & RDES0_IPC_CSUM_ERROR) >> 7;
+ } else {
+ struct dma_desc *p = (struct dma_desc *)desc;
+ if ((le32_to_cpu(p->des2) == 0xffffffff) &&
+ (le32_to_cpu(p->des3) == 0xffffffff))
+ /* timestamp is corrupted, hence don't store it */
+ return 0;
+ else
+ return 1;
+ }
+}
+
+static void enh_desc_display_ring(void *head, unsigned int size, bool rx,
+ dma_addr_t dma_rx_phy, unsigned int desc_size)
+{
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ dma_addr_t dma_addr;
+ int i;
+
+ pr_info("Extended %s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+ dma_addr = dma_rx_phy + i * sizeof(*ep);
+
+ x = *(u64 *)ep;
+ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ (unsigned int)x, (unsigned int)(x >> 32),
+ ep->basic.des2, ep->basic.des3);
+ ep++;
+ }
+ pr_info("\n");
+}
+
+static void enh_desc_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(addr);
+}
+
+static void enh_desc_clear(struct dma_desc *p)
+{
+ p->des2 = 0;
+}
+
+const struct stmmac_desc_ops enh_desc_ops = {
+ .tx_status = enh_desc_get_tx_status,
+ .rx_status = enh_desc_get_rx_status,
+ .get_tx_len = enh_desc_get_tx_len,
+ .init_rx_desc = enh_desc_init_rx_desc,
+ .init_tx_desc = enh_desc_init_tx_desc,
+ .get_tx_owner = enh_desc_get_tx_owner,
+ .release_tx_desc = enh_desc_release_tx_desc,
+ .prepare_tx_desc = enh_desc_prepare_tx_desc,
+ .set_tx_ic = enh_desc_set_tx_ic,
+ .get_tx_ls = enh_desc_get_tx_ls,
+ .set_tx_owner = enh_desc_set_tx_owner,
+ .set_rx_owner = enh_desc_set_rx_owner,
+ .get_rx_frame_len = enh_desc_get_rx_frame_len,
+ .rx_extended_status = enh_desc_get_ext_status,
+ .enable_tx_timestamp = enh_desc_enable_tx_timestamp,
+ .get_tx_timestamp_status = enh_desc_get_tx_timestamp_status,
+ .get_timestamp = enh_desc_get_timestamp,
+ .get_rx_timestamp_status = enh_desc_get_rx_timestamp_status,
+ .display_ring = enh_desc_display_ring,
+ .set_addr = enh_desc_set_addr,
+ .clear = enh_desc_clear,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.c b/drivers/net/ethernet/stmicro/stmmac/hwif.c
new file mode 100644
index 000000000..bb7114f97
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac HW Interface Handling
+ */
+
+#include "common.h"
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+
+static u32 stmmac_get_id(struct stmmac_priv *priv, u32 id_reg)
+{
+ u32 reg = readl(priv->ioaddr + id_reg);
+
+ if (!reg) {
+ dev_info(priv->device, "Version ID not available\n");
+ return 0x0;
+ }
+
+ dev_info(priv->device, "User ID: 0x%x, Synopsys ID: 0x%x\n",
+ (unsigned int)(reg & GENMASK(15, 8)) >> 8,
+ (unsigned int)(reg & GENMASK(7, 0)));
+ return reg & GENMASK(7, 0);
+}
+
+static u32 stmmac_get_dev_id(struct stmmac_priv *priv, u32 id_reg)
+{
+ u32 reg = readl(priv->ioaddr + id_reg);
+
+ if (!reg) {
+ dev_info(priv->device, "Version ID not available\n");
+ return 0x0;
+ }
+
+ return (reg & GENMASK(15, 8)) >> 8;
+}
+
+static void stmmac_dwmac_mode_quirk(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ if (priv->chain_mode) {
+ dev_info(priv->device, "Chain mode enabled\n");
+ priv->mode = STMMAC_CHAIN_MODE;
+ mac->mode = &chain_mode_ops;
+ } else {
+ dev_info(priv->device, "Ring mode enabled\n");
+ priv->mode = STMMAC_RING_MODE;
+ mac->mode = &ring_mode_ops;
+ }
+}
+
+static int stmmac_dwmac1_quirks(struct stmmac_priv *priv)
+{
+ struct mac_device_info *mac = priv->hw;
+
+ if (priv->plat->enh_desc) {
+ dev_info(priv->device, "Enhanced/Alternate descriptors\n");
+
+ /* GMAC older than 3.50 has no extended descriptors */
+ if (priv->synopsys_id >= DWMAC_CORE_3_50) {
+ dev_info(priv->device, "Enabled extended descriptors\n");
+ priv->extend_desc = 1;
+ } else {
+ dev_warn(priv->device, "Extended descriptors not supported\n");
+ }
+
+ mac->desc = &enh_desc_ops;
+ } else {
+ dev_info(priv->device, "Normal descriptors\n");
+ mac->desc = &ndesc_ops;
+ }
+
+ stmmac_dwmac_mode_quirk(priv);
+ return 0;
+}
+
+static int stmmac_dwmac4_quirks(struct stmmac_priv *priv)
+{
+ stmmac_dwmac_mode_quirk(priv);
+ return 0;
+}
+
+static int stmmac_dwxlgmac_quirks(struct stmmac_priv *priv)
+{
+ priv->hw->xlgmac = true;
+ return 0;
+}
+
+static const struct stmmac_hwif_entry {
+ bool gmac;
+ bool gmac4;
+ bool xgmac;
+ u32 min_id;
+ u32 dev_id;
+ const struct stmmac_regs_off regs;
+ const void *desc;
+ const void *dma;
+ const void *mac;
+ const void *hwtimestamp;
+ const void *mode;
+ const void *tc;
+ const void *mmc;
+ int (*setup)(struct stmmac_priv *priv);
+ int (*quirks)(struct stmmac_priv *priv);
+} stmmac_hw[] = {
+ /* NOTE: New HW versions shall go to the end of this table */
+ {
+ .gmac = false,
+ .gmac4 = false,
+ .xgmac = false,
+ .min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC3_X_OFFSET,
+ .mmc_off = MMC_GMAC3_X_OFFSET,
+ },
+ .desc = NULL,
+ .dma = &dwmac100_dma_ops,
+ .mac = &dwmac100_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .mmc = &dwmac_mmc_ops,
+ .setup = dwmac100_setup,
+ .quirks = stmmac_dwmac1_quirks,
+ }, {
+ .gmac = true,
+ .gmac4 = false,
+ .xgmac = false,
+ .min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC3_X_OFFSET,
+ .mmc_off = MMC_GMAC3_X_OFFSET,
+ },
+ .desc = NULL,
+ .dma = &dwmac1000_dma_ops,
+ .mac = &dwmac1000_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = NULL,
+ .mmc = &dwmac_mmc_ops,
+ .setup = dwmac1000_setup,
+ .quirks = stmmac_dwmac1_quirks,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .xgmac = false,
+ .min_id = 0,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac4_dma_ops,
+ .mac = &dwmac4_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
+ .setup = dwmac4_setup,
+ .quirks = stmmac_dwmac4_quirks,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .xgmac = false,
+ .min_id = DWMAC_CORE_4_00,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac4_dma_ops,
+ .mac = &dwmac410_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = &dwmac4_ring_mode_ops,
+ .tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
+ .setup = dwmac4_setup,
+ .quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .xgmac = false,
+ .min_id = DWMAC_CORE_4_10,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac410_dma_ops,
+ .mac = &dwmac410_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = &dwmac4_ring_mode_ops,
+ .tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
+ .setup = dwmac4_setup,
+ .quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = true,
+ .xgmac = false,
+ .min_id = DWMAC_CORE_5_10,
+ .regs = {
+ .ptp_off = PTP_GMAC4_OFFSET,
+ .mmc_off = MMC_GMAC4_OFFSET,
+ },
+ .desc = &dwmac4_desc_ops,
+ .dma = &dwmac410_dma_ops,
+ .mac = &dwmac510_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = &dwmac4_ring_mode_ops,
+ .tc = &dwmac510_tc_ops,
+ .mmc = &dwmac_mmc_ops,
+ .setup = dwmac4_setup,
+ .quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = false,
+ .xgmac = true,
+ .min_id = DWXGMAC_CORE_2_10,
+ .dev_id = DWXGMAC_ID,
+ .regs = {
+ .ptp_off = PTP_XGMAC_OFFSET,
+ .mmc_off = MMC_XGMAC_OFFSET,
+ },
+ .desc = &dwxgmac210_desc_ops,
+ .dma = &dwxgmac210_dma_ops,
+ .mac = &dwxgmac210_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = &dwmac510_tc_ops,
+ .mmc = &dwxgmac_mmc_ops,
+ .setup = dwxgmac2_setup,
+ .quirks = NULL,
+ }, {
+ .gmac = false,
+ .gmac4 = false,
+ .xgmac = true,
+ .min_id = DWXLGMAC_CORE_2_00,
+ .dev_id = DWXLGMAC_ID,
+ .regs = {
+ .ptp_off = PTP_XGMAC_OFFSET,
+ .mmc_off = MMC_XGMAC_OFFSET,
+ },
+ .desc = &dwxgmac210_desc_ops,
+ .dma = &dwxgmac210_dma_ops,
+ .mac = &dwxlgmac2_ops,
+ .hwtimestamp = &stmmac_ptp,
+ .mode = NULL,
+ .tc = &dwmac510_tc_ops,
+ .mmc = &dwxgmac_mmc_ops,
+ .setup = dwxlgmac2_setup,
+ .quirks = stmmac_dwxlgmac_quirks,
+ },
+};
+
+int stmmac_hwif_init(struct stmmac_priv *priv)
+{
+ bool needs_xgmac = priv->plat->has_xgmac;
+ bool needs_gmac4 = priv->plat->has_gmac4;
+ bool needs_gmac = priv->plat->has_gmac;
+ const struct stmmac_hwif_entry *entry;
+ struct mac_device_info *mac;
+ bool needs_setup = true;
+ u32 id, dev_id = 0;
+ int i, ret;
+
+ if (needs_gmac) {
+ id = stmmac_get_id(priv, GMAC_VERSION);
+ } else if (needs_gmac4 || needs_xgmac) {
+ id = stmmac_get_id(priv, GMAC4_VERSION);
+ if (needs_xgmac)
+ dev_id = stmmac_get_dev_id(priv, GMAC4_VERSION);
+ } else {
+ id = 0;
+ }
+
+ /* Save ID for later use */
+ priv->synopsys_id = id;
+
+ /* Lets assume some safe values first */
+ priv->ptpaddr = priv->ioaddr +
+ (needs_gmac4 ? PTP_GMAC4_OFFSET : PTP_GMAC3_X_OFFSET);
+ priv->mmcaddr = priv->ioaddr +
+ (needs_gmac4 ? MMC_GMAC4_OFFSET : MMC_GMAC3_X_OFFSET);
+
+ /* Check for HW specific setup first */
+ if (priv->plat->setup) {
+ mac = priv->plat->setup(priv);
+ needs_setup = false;
+ } else {
+ mac = devm_kzalloc(priv->device, sizeof(*mac), GFP_KERNEL);
+ }
+
+ if (!mac)
+ return -ENOMEM;
+
+ /* Fallback to generic HW */
+ for (i = ARRAY_SIZE(stmmac_hw) - 1; i >= 0; i--) {
+ entry = &stmmac_hw[i];
+
+ if (needs_gmac ^ entry->gmac)
+ continue;
+ if (needs_gmac4 ^ entry->gmac4)
+ continue;
+ if (needs_xgmac ^ entry->xgmac)
+ continue;
+ /* Use synopsys_id var because some setups can override this */
+ if (priv->synopsys_id < entry->min_id)
+ continue;
+ if (needs_xgmac && (dev_id ^ entry->dev_id))
+ continue;
+
+ /* Only use generic HW helpers if needed */
+ mac->desc = mac->desc ? : entry->desc;
+ mac->dma = mac->dma ? : entry->dma;
+ mac->mac = mac->mac ? : entry->mac;
+ mac->ptp = mac->ptp ? : entry->hwtimestamp;
+ mac->mode = mac->mode ? : entry->mode;
+ mac->tc = mac->tc ? : entry->tc;
+ mac->mmc = mac->mmc ? : entry->mmc;
+
+ priv->hw = mac;
+ priv->ptpaddr = priv->ioaddr + entry->regs.ptp_off;
+ priv->mmcaddr = priv->ioaddr + entry->regs.mmc_off;
+
+ /* Entry found */
+ if (needs_setup) {
+ ret = entry->setup(priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Save quirks, if needed for posterior use */
+ priv->hwif_quirks = entry->quirks;
+ return 0;
+ }
+
+ dev_err(priv->device, "Failed to find HW IF (id=0x%x, gmac=%d/%d)\n",
+ id, needs_gmac, needs_gmac4);
+ return -EINVAL;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
new file mode 100644
index 000000000..b2b9cf04b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -0,0 +1,642 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+// Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+// stmmac HW Interface Callbacks
+
+#ifndef __STMMAC_HWIF_H__
+#define __STMMAC_HWIF_H__
+
+#include <linux/netdevice.h>
+#include <linux/stmmac.h>
+
+#define stmmac_do_void_callback(__priv, __module, __cname, __arg0, __args...) \
+({ \
+ int __result = -EINVAL; \
+ if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) { \
+ (__priv)->hw->__module->__cname((__arg0), ##__args); \
+ __result = 0; \
+ } \
+ __result; \
+})
+#define stmmac_do_callback(__priv, __module, __cname, __arg0, __args...) \
+({ \
+ int __result = -EINVAL; \
+ if ((__priv)->hw->__module && (__priv)->hw->__module->__cname) \
+ __result = (__priv)->hw->__module->__cname((__arg0), ##__args); \
+ __result; \
+})
+
+struct stmmac_extra_stats;
+struct stmmac_safety_stats;
+struct dma_desc;
+struct dma_extended_desc;
+struct dma_edesc;
+
+/* Descriptors helpers */
+struct stmmac_desc_ops {
+ /* DMA RX descriptor ring initialization */
+ void (*init_rx_desc)(struct dma_desc *p, int disable_rx_ic, int mode,
+ int end, int bfsize);
+ /* DMA TX descriptor ring initialization */
+ void (*init_tx_desc)(struct dma_desc *p, int mode, int end);
+ /* Invoked by the xmit function to prepare the tx descriptor */
+ void (*prepare_tx_desc)(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own, bool ls,
+ unsigned int tot_pkt_len);
+ void (*prepare_tso_tx_desc)(struct dma_desc *p, int is_fs, int len1,
+ int len2, bool tx_own, bool ls, unsigned int tcphdrlen,
+ unsigned int tcppayloadlen);
+ /* Set/get the owner of the descriptor */
+ void (*set_tx_owner)(struct dma_desc *p);
+ int (*get_tx_owner)(struct dma_desc *p);
+ /* Clean the tx descriptor as soon as the tx irq is received */
+ void (*release_tx_desc)(struct dma_desc *p, int mode);
+ /* Clear interrupt on tx frame completion. When this bit is
+ * set an interrupt happens as soon as the frame is transmitted */
+ void (*set_tx_ic)(struct dma_desc *p);
+ /* Last tx segment reports the transmit status */
+ int (*get_tx_ls)(struct dma_desc *p);
+ /* Return the transmit status looking at the TDES1 */
+ int (*tx_status)(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr);
+ /* Get the buffer size from the descriptor */
+ int (*get_tx_len)(struct dma_desc *p);
+ /* Handle extra events on specific interrupts hw dependent */
+ void (*set_rx_owner)(struct dma_desc *p, int disable_rx_ic);
+ /* Get the receive frame size */
+ int (*get_rx_frame_len)(struct dma_desc *p, int rx_coe_type);
+ /* Return the reception status looking at the RDES1 */
+ int (*rx_status)(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p);
+ void (*rx_extended_status)(void *data, struct stmmac_extra_stats *x,
+ struct dma_extended_desc *p);
+ /* Set tx timestamp enable bit */
+ void (*enable_tx_timestamp) (struct dma_desc *p);
+ /* get tx timestamp status */
+ int (*get_tx_timestamp_status) (struct dma_desc *p);
+ /* get timestamp value */
+ void (*get_timestamp)(void *desc, u32 ats, u64 *ts);
+ /* get rx timestamp status */
+ int (*get_rx_timestamp_status)(void *desc, void *next_desc, u32 ats);
+ /* Display ring */
+ void (*display_ring)(void *head, unsigned int size, bool rx,
+ dma_addr_t dma_rx_phy, unsigned int desc_size);
+ /* set MSS via context descriptor */
+ void (*set_mss)(struct dma_desc *p, unsigned int mss);
+ /* set descriptor skbuff address */
+ void (*set_addr)(struct dma_desc *p, dma_addr_t addr);
+ /* clear descriptor */
+ void (*clear)(struct dma_desc *p);
+ /* RSS */
+ int (*get_rx_hash)(struct dma_desc *p, u32 *hash,
+ enum pkt_hash_types *type);
+ void (*get_rx_header_len)(struct dma_desc *p, unsigned int *len);
+ void (*set_sec_addr)(struct dma_desc *p, dma_addr_t addr, bool buf2_valid);
+ void (*set_sarc)(struct dma_desc *p, u32 sarc_type);
+ void (*set_vlan_tag)(struct dma_desc *p, u16 tag, u16 inner_tag,
+ u32 inner_type);
+ void (*set_vlan)(struct dma_desc *p, u32 type);
+ void (*set_tbs)(struct dma_edesc *p, u32 sec, u32 nsec);
+};
+
+#define stmmac_init_rx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, init_rx_desc, __args)
+#define stmmac_init_tx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, init_tx_desc, __args)
+#define stmmac_prepare_tx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, prepare_tx_desc, __args)
+#define stmmac_prepare_tso_tx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, prepare_tso_tx_desc, __args)
+#define stmmac_set_tx_owner(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_tx_owner, __args)
+#define stmmac_get_tx_owner(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_tx_owner, __args)
+#define stmmac_release_tx_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, release_tx_desc, __args)
+#define stmmac_set_tx_ic(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_tx_ic, __args)
+#define stmmac_get_tx_ls(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_tx_ls, __args)
+#define stmmac_tx_status(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, tx_status, __args)
+#define stmmac_get_tx_len(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_tx_len, __args)
+#define stmmac_set_rx_owner(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_rx_owner, __args)
+#define stmmac_get_rx_frame_len(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_frame_len, __args)
+#define stmmac_rx_status(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, rx_status, __args)
+#define stmmac_rx_extended_status(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, rx_extended_status, __args)
+#define stmmac_enable_tx_timestamp(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, enable_tx_timestamp, __args)
+#define stmmac_get_tx_timestamp_status(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_tx_timestamp_status, __args)
+#define stmmac_get_timestamp(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, get_timestamp, __args)
+#define stmmac_get_rx_timestamp_status(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_timestamp_status, __args)
+#define stmmac_display_ring(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, display_ring, __args)
+#define stmmac_set_mss(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_mss, __args)
+#define stmmac_set_desc_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_addr, __args)
+#define stmmac_clear_desc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, clear, __args)
+#define stmmac_get_rx_hash(__priv, __args...) \
+ stmmac_do_callback(__priv, desc, get_rx_hash, __args)
+#define stmmac_get_rx_header_len(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, get_rx_header_len, __args)
+#define stmmac_set_desc_sec_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sec_addr, __args)
+#define stmmac_set_desc_sarc(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_sarc, __args)
+#define stmmac_set_desc_vlan_tag(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan_tag, __args)
+#define stmmac_set_desc_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_vlan, __args)
+#define stmmac_set_desc_tbs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, desc, set_tbs, __args)
+
+struct stmmac_dma_cfg;
+struct dma_features;
+
+/* Specific DMA helpers */
+struct stmmac_dma_ops {
+ /* DMA core initialization */
+ int (*reset)(void __iomem *ioaddr);
+ void (*init)(void __iomem *ioaddr, struct stmmac_dma_cfg *dma_cfg,
+ int atds);
+ void (*init_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg, u32 chan);
+ void (*init_rx_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t phy, u32 chan);
+ void (*init_tx_chan)(void __iomem *ioaddr,
+ struct stmmac_dma_cfg *dma_cfg,
+ dma_addr_t phy, u32 chan);
+ /* Configure the AXI Bus Mode Register */
+ void (*axi)(void __iomem *ioaddr, struct stmmac_axi *axi);
+ /* Dump DMA registers */
+ void (*dump_regs)(void __iomem *ioaddr, u32 *reg_space);
+ void (*dma_rx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+ int fifosz, u8 qmode);
+ void (*dma_tx_mode)(void __iomem *ioaddr, int mode, u32 channel,
+ int fifosz, u8 qmode);
+ /* To track extra statistic (if supported) */
+ void (*dma_diagnostic_fr) (void *data, struct stmmac_extra_stats *x,
+ void __iomem *ioaddr);
+ void (*enable_dma_transmission) (void __iomem *ioaddr);
+ void (*enable_dma_irq)(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx);
+ void (*disable_dma_irq)(void __iomem *ioaddr, u32 chan,
+ bool rx, bool tx);
+ void (*start_tx)(void __iomem *ioaddr, u32 chan);
+ void (*stop_tx)(void __iomem *ioaddr, u32 chan);
+ void (*start_rx)(void __iomem *ioaddr, u32 chan);
+ void (*stop_rx)(void __iomem *ioaddr, u32 chan);
+ int (*dma_interrupt) (void __iomem *ioaddr,
+ struct stmmac_extra_stats *x, u32 chan, u32 dir);
+ /* If supported then get the optional core features */
+ int (*get_hw_feature)(void __iomem *ioaddr,
+ struct dma_features *dma_cap);
+ /* Program the HW RX Watchdog */
+ void (*rx_watchdog)(void __iomem *ioaddr, u32 riwt, u32 queue);
+ void (*set_tx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
+ void (*set_rx_ring_len)(void __iomem *ioaddr, u32 len, u32 chan);
+ void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
+ void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+ void (*qmode)(void __iomem *ioaddr, u32 channel, u8 qmode);
+ void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
+ void (*enable_sph)(void __iomem *ioaddr, bool en, u32 chan);
+ int (*enable_tbs)(void __iomem *ioaddr, bool en, u32 chan);
+};
+
+#define stmmac_reset(__priv, __args...) \
+ stmmac_do_callback(__priv, dma, reset, __args)
+#define stmmac_dma_init(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, init, __args)
+#define stmmac_init_chan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, init_chan, __args)
+#define stmmac_init_rx_chan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, init_rx_chan, __args)
+#define stmmac_init_tx_chan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, init_tx_chan, __args)
+#define stmmac_axi(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, axi, __args)
+#define stmmac_dump_dma_regs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, dump_regs, __args)
+#define stmmac_dma_rx_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, dma_rx_mode, __args)
+#define stmmac_dma_tx_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, dma_tx_mode, __args)
+#define stmmac_dma_diagnostic_fr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, dma_diagnostic_fr, __args)
+#define stmmac_enable_dma_transmission(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_dma_transmission, __args)
+#define stmmac_enable_dma_irq(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_dma_irq, __args)
+#define stmmac_disable_dma_irq(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, disable_dma_irq, __args)
+#define stmmac_start_tx(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, start_tx, __args)
+#define stmmac_stop_tx(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, stop_tx, __args)
+#define stmmac_start_rx(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, start_rx, __args)
+#define stmmac_stop_rx(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, stop_rx, __args)
+#define stmmac_dma_interrupt_status(__priv, __args...) \
+ stmmac_do_callback(__priv, dma, dma_interrupt, __args)
+#define stmmac_get_hw_feature(__priv, __args...) \
+ stmmac_do_callback(__priv, dma, get_hw_feature, __args)
+#define stmmac_rx_watchdog(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, rx_watchdog, __args)
+#define stmmac_set_tx_ring_len(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_tx_ring_len, __args)
+#define stmmac_set_rx_ring_len(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_rx_ring_len, __args)
+#define stmmac_set_rx_tail_ptr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_rx_tail_ptr, __args)
+#define stmmac_set_tx_tail_ptr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
+#define stmmac_enable_tso(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_dma_qmode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, qmode, __args)
+#define stmmac_set_dma_bfsize(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
+#define stmmac_enable_sph(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, enable_sph, __args)
+#define stmmac_enable_tbs(__priv, __args...) \
+ stmmac_do_callback(__priv, dma, enable_tbs, __args)
+
+struct mac_device_info;
+struct net_device;
+struct rgmii_adv;
+struct stmmac_tc_entry;
+struct stmmac_pps_cfg;
+struct stmmac_rss;
+struct stmmac_est;
+
+/* Helpers to program the MAC core */
+struct stmmac_ops {
+ /* MAC core initialization */
+ void (*core_init)(struct mac_device_info *hw, struct net_device *dev);
+ /* Enable the MAC RX/TX */
+ void (*set_mac)(void __iomem *ioaddr, bool enable);
+ /* Enable and verify that the IPC module is supported */
+ int (*rx_ipc)(struct mac_device_info *hw);
+ /* Enable RX Queues */
+ void (*rx_queue_enable)(struct mac_device_info *hw, u8 mode, u32 queue);
+ /* RX Queues Priority */
+ void (*rx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+ /* TX Queues Priority */
+ void (*tx_queue_prio)(struct mac_device_info *hw, u32 prio, u32 queue);
+ /* RX Queues Routing */
+ void (*rx_queue_routing)(struct mac_device_info *hw, u8 packet,
+ u32 queue);
+ /* Program RX Algorithms */
+ void (*prog_mtl_rx_algorithms)(struct mac_device_info *hw, u32 rx_alg);
+ /* Program TX Algorithms */
+ void (*prog_mtl_tx_algorithms)(struct mac_device_info *hw, u32 tx_alg);
+ /* Set MTL TX queues weight */
+ void (*set_mtl_tx_queue_weight)(struct mac_device_info *hw,
+ u32 weight, u32 queue);
+ /* RX MTL queue to RX dma mapping */
+ void (*map_mtl_to_dma)(struct mac_device_info *hw, u32 queue, u32 chan);
+ /* Configure AV Algorithm */
+ void (*config_cbs)(struct mac_device_info *hw, u32 send_slope,
+ u32 idle_slope, u32 high_credit, u32 low_credit,
+ u32 queue);
+ /* Dump MAC registers */
+ void (*dump_regs)(struct mac_device_info *hw, u32 *reg_space);
+ /* Handle extra events on specific interrupts hw dependent */
+ int (*host_irq_status)(struct mac_device_info *hw,
+ struct stmmac_extra_stats *x);
+ /* Handle MTL interrupts */
+ int (*host_mtl_irq_status)(struct mac_device_info *hw, u32 chan);
+ /* Multicast filter setting */
+ void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
+ /* Flow control setting */
+ void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
+ unsigned int fc, unsigned int pause_time, u32 tx_cnt);
+ /* Set power management mode (e.g. magic frame) */
+ void (*pmt)(struct mac_device_info *hw, unsigned long mode);
+ /* Set/Get Unicast MAC addresses */
+ void (*set_umac_addr)(struct mac_device_info *hw,
+ const unsigned char *addr,
+ unsigned int reg_n);
+ void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
+ unsigned int reg_n);
+ void (*set_eee_mode)(struct mac_device_info *hw,
+ bool en_tx_lpi_clockgating);
+ void (*reset_eee_mode)(struct mac_device_info *hw);
+ void (*set_eee_lpi_entry_timer)(struct mac_device_info *hw, int et);
+ void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
+ void (*set_eee_pls)(struct mac_device_info *hw, int link);
+ void (*debug)(void __iomem *ioaddr, struct stmmac_extra_stats *x,
+ u32 rx_queues, u32 tx_queues);
+ /* PCS calls */
+ void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral,
+ bool loopback);
+ void (*pcs_rane)(void __iomem *ioaddr, bool restart);
+ void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv);
+ /* Safety Features */
+ int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_feature_cfg *safety_cfg);
+ int (*safety_feat_irq_status)(struct net_device *ndev,
+ void __iomem *ioaddr, unsigned int asp,
+ struct stmmac_safety_stats *stats);
+ int (*safety_feat_dump)(struct stmmac_safety_stats *stats,
+ int index, unsigned long *count, const char **desc);
+ /* Flexible RX Parser */
+ int (*rxp_config)(void __iomem *ioaddr, struct stmmac_tc_entry *entries,
+ unsigned int count);
+ /* Flexible PPS */
+ int (*flex_pps_config)(void __iomem *ioaddr, int index,
+ struct stmmac_pps_cfg *cfg, bool enable,
+ u32 sub_second_inc, u32 systime_flags);
+ /* Loopback for selftests */
+ void (*set_mac_loopback)(void __iomem *ioaddr, bool enable);
+ /* RSS */
+ int (*rss_configure)(struct mac_device_info *hw,
+ struct stmmac_rss *cfg, u32 num_rxq);
+ /* VLAN */
+ void (*update_vlan_hash)(struct mac_device_info *hw, u32 hash,
+ __le16 perfect_match, bool is_double);
+ void (*enable_vlan)(struct mac_device_info *hw, u32 type);
+ int (*add_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ int (*del_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw,
+ __be16 proto, u16 vid);
+ void (*restore_hw_vlan_rx_fltr)(struct net_device *dev,
+ struct mac_device_info *hw);
+ /* TX Timestamp */
+ int (*get_mac_tx_timestamp)(struct mac_device_info *hw, u64 *ts);
+ /* Source Address Insertion / Replacement */
+ void (*sarc_configure)(void __iomem *ioaddr, int val);
+ /* Filtering */
+ int (*config_l3_filter)(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool ipv6, bool sa, bool inv,
+ u32 match);
+ int (*config_l4_filter)(struct mac_device_info *hw, u32 filter_no,
+ bool en, bool udp, bool sa, bool inv,
+ u32 match);
+ void (*set_arp_offload)(struct mac_device_info *hw, bool en, u32 addr);
+ int (*est_configure)(void __iomem *ioaddr, struct stmmac_est *cfg,
+ unsigned int ptp_rate);
+ void (*est_irq_status)(void __iomem *ioaddr, struct net_device *dev,
+ struct stmmac_extra_stats *x, u32 txqcnt);
+ void (*fpe_configure)(void __iomem *ioaddr, struct stmmac_fpe_cfg *cfg,
+ u32 num_txq, u32 num_rxq,
+ bool enable);
+ void (*fpe_send_mpacket)(void __iomem *ioaddr,
+ struct stmmac_fpe_cfg *cfg,
+ enum stmmac_mpacket_type type);
+ int (*fpe_irq_status)(void __iomem *ioaddr, struct net_device *dev);
+};
+
+#define stmmac_core_init(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, core_init, __args)
+#define stmmac_mac_set(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_mac, __args)
+#define stmmac_rx_ipc(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rx_ipc, __args)
+#define stmmac_rx_queue_enable(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, rx_queue_enable, __args)
+#define stmmac_rx_queue_prio(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, rx_queue_prio, __args)
+#define stmmac_tx_queue_prio(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, tx_queue_prio, __args)
+#define stmmac_rx_queue_routing(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, rx_queue_routing, __args)
+#define stmmac_prog_mtl_rx_algorithms(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, prog_mtl_rx_algorithms, __args)
+#define stmmac_prog_mtl_tx_algorithms(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, prog_mtl_tx_algorithms, __args)
+#define stmmac_set_mtl_tx_queue_weight(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_mtl_tx_queue_weight, __args)
+#define stmmac_map_mtl_to_dma(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, map_mtl_to_dma, __args)
+#define stmmac_config_cbs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, config_cbs, __args)
+#define stmmac_dump_mac_regs(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, dump_regs, __args)
+#define stmmac_host_irq_status(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, host_irq_status, __args)
+#define stmmac_host_mtl_irq_status(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, host_mtl_irq_status, __args)
+#define stmmac_set_filter(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_filter, __args)
+#define stmmac_flow_ctrl(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, flow_ctrl, __args)
+#define stmmac_pmt(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, pmt, __args)
+#define stmmac_set_umac_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_umac_addr, __args)
+#define stmmac_get_umac_addr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, get_umac_addr, __args)
+#define stmmac_set_eee_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_eee_mode, __args)
+#define stmmac_reset_eee_mode(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, reset_eee_mode, __args)
+#define stmmac_set_eee_lpi_timer(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_eee_lpi_entry_timer, __args)
+#define stmmac_set_eee_timer(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_eee_timer, __args)
+#define stmmac_set_eee_pls(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_eee_pls, __args)
+#define stmmac_mac_debug(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, debug, __args)
+#define stmmac_pcs_ctrl_ane(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args)
+#define stmmac_pcs_rane(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, pcs_rane, __args)
+#define stmmac_pcs_get_adv_lp(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args)
+#define stmmac_safety_feat_config(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, safety_feat_config, __args)
+#define stmmac_safety_feat_irq_status(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, safety_feat_irq_status, __args)
+#define stmmac_safety_feat_dump(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, safety_feat_dump, __args)
+#define stmmac_rxp_config(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rxp_config, __args)
+#define stmmac_flex_pps_config(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, flex_pps_config, __args)
+#define stmmac_set_mac_loopback(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_mac_loopback, __args)
+#define stmmac_rss_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, rss_configure, __args)
+#define stmmac_update_vlan_hash(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, update_vlan_hash, __args)
+#define stmmac_enable_vlan(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, enable_vlan, __args)
+#define stmmac_add_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, add_hw_vlan_rx_fltr, __args)
+#define stmmac_del_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, del_hw_vlan_rx_fltr, __args)
+#define stmmac_restore_hw_vlan_rx_fltr(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, restore_hw_vlan_rx_fltr, __args)
+#define stmmac_get_mac_tx_timestamp(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, get_mac_tx_timestamp, __args)
+#define stmmac_sarc_configure(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, sarc_configure, __args)
+#define stmmac_config_l3_filter(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, config_l3_filter, __args)
+#define stmmac_config_l4_filter(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, config_l4_filter, __args)
+#define stmmac_set_arp_offload(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, set_arp_offload, __args)
+#define stmmac_est_configure(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, est_configure, __args)
+#define stmmac_est_irq_status(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, est_irq_status, __args)
+#define stmmac_fpe_configure(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_configure, __args)
+#define stmmac_fpe_send_mpacket(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mac, fpe_send_mpacket, __args)
+#define stmmac_fpe_irq_status(__priv, __args...) \
+ stmmac_do_callback(__priv, mac, fpe_irq_status, __args)
+
+struct stmmac_priv;
+
+/* PTP and HW Timer helpers */
+struct stmmac_hwtimestamp {
+ void (*config_hw_tstamping) (void __iomem *ioaddr, u32 data);
+ void (*config_sub_second_increment)(void __iomem *ioaddr, u32 ptp_clock,
+ int gmac4, u32 *ssinc);
+ int (*init_systime) (void __iomem *ioaddr, u32 sec, u32 nsec);
+ int (*config_addend) (void __iomem *ioaddr, u32 addend);
+ int (*adjust_systime) (void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub, int gmac4);
+ void (*get_systime) (void __iomem *ioaddr, u64 *systime);
+ void (*get_ptptime)(void __iomem *ioaddr, u64 *ptp_time);
+ void (*timestamp_interrupt)(struct stmmac_priv *priv);
+};
+
+#define stmmac_config_hw_tstamping(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, config_hw_tstamping, __args)
+#define stmmac_config_sub_second_increment(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, config_sub_second_increment, __args)
+#define stmmac_init_systime(__priv, __args...) \
+ stmmac_do_callback(__priv, ptp, init_systime, __args)
+#define stmmac_config_addend(__priv, __args...) \
+ stmmac_do_callback(__priv, ptp, config_addend, __args)
+#define stmmac_adjust_systime(__priv, __args...) \
+ stmmac_do_callback(__priv, ptp, adjust_systime, __args)
+#define stmmac_get_systime(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, get_systime, __args)
+#define stmmac_get_ptptime(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, get_ptptime, __args)
+#define stmmac_timestamp_interrupt(__priv, __args...) \
+ stmmac_do_void_callback(__priv, ptp, timestamp_interrupt, __args)
+
+/* Helpers to manage the descriptors for chain and ring modes */
+struct stmmac_mode_ops {
+ void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
+ unsigned int extend_desc);
+ unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
+ int (*jumbo_frm)(void *priv, struct sk_buff *skb, int csum);
+ int (*set_16kib_bfsize)(int mtu);
+ void (*init_desc3)(struct dma_desc *p);
+ void (*refill_desc3) (void *priv, struct dma_desc *p);
+ void (*clean_desc3) (void *priv, struct dma_desc *p);
+};
+
+#define stmmac_mode_init(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mode, init, __args)
+#define stmmac_is_jumbo_frm(__priv, __args...) \
+ stmmac_do_callback(__priv, mode, is_jumbo_frm, __args)
+#define stmmac_jumbo_frm(__priv, __args...) \
+ stmmac_do_callback(__priv, mode, jumbo_frm, __args)
+#define stmmac_set_16kib_bfsize(__priv, __args...) \
+ stmmac_do_callback(__priv, mode, set_16kib_bfsize, __args)
+#define stmmac_init_desc3(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mode, init_desc3, __args)
+#define stmmac_refill_desc3(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mode, refill_desc3, __args)
+#define stmmac_clean_desc3(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mode, clean_desc3, __args)
+
+struct tc_cls_u32_offload;
+struct tc_cbs_qopt_offload;
+struct flow_cls_offload;
+struct tc_taprio_qopt_offload;
+struct tc_etf_qopt_offload;
+
+struct stmmac_tc_ops {
+ int (*init)(struct stmmac_priv *priv);
+ int (*setup_cls_u32)(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls);
+ int (*setup_cbs)(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt);
+ int (*setup_cls)(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls);
+ int (*setup_taprio)(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt);
+ int (*setup_etf)(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt);
+};
+
+#define stmmac_tc_init(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, init, __args)
+#define stmmac_tc_setup_cls_u32(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_cls_u32, __args)
+#define stmmac_tc_setup_cbs(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_cbs, __args)
+#define stmmac_tc_setup_cls(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_cls, __args)
+#define stmmac_tc_setup_taprio(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_taprio, __args)
+#define stmmac_tc_setup_etf(__priv, __args...) \
+ stmmac_do_callback(__priv, tc, setup_etf, __args)
+
+struct stmmac_counters;
+
+struct stmmac_mmc_ops {
+ void (*ctrl)(void __iomem *ioaddr, unsigned int mode);
+ void (*intr_all_mask)(void __iomem *ioaddr);
+ void (*read)(void __iomem *ioaddr, struct stmmac_counters *mmc);
+};
+
+#define stmmac_mmc_ctrl(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mmc, ctrl, __args)
+#define stmmac_mmc_intr_all_mask(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mmc, intr_all_mask, __args)
+#define stmmac_mmc_read(__priv, __args...) \
+ stmmac_do_void_callback(__priv, mmc, read, __args)
+
+struct stmmac_regs_off {
+ u32 ptp_off;
+ u32 mmc_off;
+};
+
+extern const struct stmmac_ops dwmac100_ops;
+extern const struct stmmac_dma_ops dwmac100_dma_ops;
+extern const struct stmmac_ops dwmac1000_ops;
+extern const struct stmmac_dma_ops dwmac1000_dma_ops;
+extern const struct stmmac_ops dwmac4_ops;
+extern const struct stmmac_dma_ops dwmac4_dma_ops;
+extern const struct stmmac_ops dwmac410_ops;
+extern const struct stmmac_dma_ops dwmac410_dma_ops;
+extern const struct stmmac_ops dwmac510_ops;
+extern const struct stmmac_tc_ops dwmac510_tc_ops;
+extern const struct stmmac_ops dwxgmac210_ops;
+extern const struct stmmac_ops dwxlgmac2_ops;
+extern const struct stmmac_dma_ops dwxgmac210_dma_ops;
+extern const struct stmmac_desc_ops dwxgmac210_desc_ops;
+extern const struct stmmac_mmc_ops dwmac_mmc_ops;
+extern const struct stmmac_mmc_ops dwxgmac_mmc_ops;
+
+#define GMAC_VERSION 0x00000020 /* GMAC CORE Version */
+#define GMAC4_VERSION 0x00000110 /* GMAC4+ CORE Version */
+
+int stmmac_hwif_init(struct stmmac_priv *priv);
+
+#endif /* __STMMAC_HWIF_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc.h b/drivers/net/ethernet/stmicro/stmmac/mmc.h
new file mode 100644
index 000000000..a0c059258
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*******************************************************************************
+ MMC Header file
+
+ Copyright (C) 2011 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __MMC_H__
+#define __MMC_H__
+
+/* MMC control register */
+/* When set, all counter are reset */
+#define MMC_CNTRL_COUNTER_RESET 0x1
+/* When set, do not roll over zero after reaching the max value*/
+#define MMC_CNTRL_COUNTER_STOP_ROLLOVER 0x2
+#define MMC_CNTRL_RESET_ON_READ 0x4 /* Reset after reading */
+#define MMC_CNTRL_COUNTER_FREEZER 0x8 /* Freeze counter values to the
+ * current value.*/
+#define MMC_CNTRL_PRESET 0x10
+#define MMC_CNTRL_FULL_HALF_PRESET 0x20
+
+#define MMC_GMAC4_OFFSET 0x700
+#define MMC_GMAC3_X_OFFSET 0x100
+#define MMC_XGMAC_OFFSET 0x800
+
+struct stmmac_counters {
+ unsigned int mmc_tx_octetcount_gb;
+ unsigned int mmc_tx_framecount_gb;
+ unsigned int mmc_tx_broadcastframe_g;
+ unsigned int mmc_tx_multicastframe_g;
+ unsigned int mmc_tx_64_octets_gb;
+ unsigned int mmc_tx_65_to_127_octets_gb;
+ unsigned int mmc_tx_128_to_255_octets_gb;
+ unsigned int mmc_tx_256_to_511_octets_gb;
+ unsigned int mmc_tx_512_to_1023_octets_gb;
+ unsigned int mmc_tx_1024_to_max_octets_gb;
+ unsigned int mmc_tx_unicast_gb;
+ unsigned int mmc_tx_multicast_gb;
+ unsigned int mmc_tx_broadcast_gb;
+ unsigned int mmc_tx_underflow_error;
+ unsigned int mmc_tx_singlecol_g;
+ unsigned int mmc_tx_multicol_g;
+ unsigned int mmc_tx_deferred;
+ unsigned int mmc_tx_latecol;
+ unsigned int mmc_tx_exesscol;
+ unsigned int mmc_tx_carrier_error;
+ unsigned int mmc_tx_octetcount_g;
+ unsigned int mmc_tx_framecount_g;
+ unsigned int mmc_tx_excessdef;
+ unsigned int mmc_tx_pause_frame;
+ unsigned int mmc_tx_vlan_frame_g;
+
+ /* MMC RX counter registers */
+ unsigned int mmc_rx_framecount_gb;
+ unsigned int mmc_rx_octetcount_gb;
+ unsigned int mmc_rx_octetcount_g;
+ unsigned int mmc_rx_broadcastframe_g;
+ unsigned int mmc_rx_multicastframe_g;
+ unsigned int mmc_rx_crc_error;
+ unsigned int mmc_rx_align_error;
+ unsigned int mmc_rx_run_error;
+ unsigned int mmc_rx_jabber_error;
+ unsigned int mmc_rx_undersize_g;
+ unsigned int mmc_rx_oversize_g;
+ unsigned int mmc_rx_64_octets_gb;
+ unsigned int mmc_rx_65_to_127_octets_gb;
+ unsigned int mmc_rx_128_to_255_octets_gb;
+ unsigned int mmc_rx_256_to_511_octets_gb;
+ unsigned int mmc_rx_512_to_1023_octets_gb;
+ unsigned int mmc_rx_1024_to_max_octets_gb;
+ unsigned int mmc_rx_unicast_g;
+ unsigned int mmc_rx_length_error;
+ unsigned int mmc_rx_autofrangetype;
+ unsigned int mmc_rx_pause_frames;
+ unsigned int mmc_rx_fifo_overflow;
+ unsigned int mmc_rx_vlan_frames_gb;
+ unsigned int mmc_rx_watchdog_error;
+ /* IPC */
+ unsigned int mmc_rx_ipc_intr_mask;
+ unsigned int mmc_rx_ipc_intr;
+ /* IPv4 */
+ unsigned int mmc_rx_ipv4_gd;
+ unsigned int mmc_rx_ipv4_hderr;
+ unsigned int mmc_rx_ipv4_nopay;
+ unsigned int mmc_rx_ipv4_frag;
+ unsigned int mmc_rx_ipv4_udsbl;
+
+ unsigned int mmc_rx_ipv4_gd_octets;
+ unsigned int mmc_rx_ipv4_hderr_octets;
+ unsigned int mmc_rx_ipv4_nopay_octets;
+ unsigned int mmc_rx_ipv4_frag_octets;
+ unsigned int mmc_rx_ipv4_udsbl_octets;
+
+ /* IPV6 */
+ unsigned int mmc_rx_ipv6_gd_octets;
+ unsigned int mmc_rx_ipv6_hderr_octets;
+ unsigned int mmc_rx_ipv6_nopay_octets;
+
+ unsigned int mmc_rx_ipv6_gd;
+ unsigned int mmc_rx_ipv6_hderr;
+ unsigned int mmc_rx_ipv6_nopay;
+
+ /* Protocols */
+ unsigned int mmc_rx_udp_gd;
+ unsigned int mmc_rx_udp_err;
+ unsigned int mmc_rx_tcp_gd;
+ unsigned int mmc_rx_tcp_err;
+ unsigned int mmc_rx_icmp_gd;
+ unsigned int mmc_rx_icmp_err;
+
+ unsigned int mmc_rx_udp_gd_octets;
+ unsigned int mmc_rx_udp_err_octets;
+ unsigned int mmc_rx_tcp_gd_octets;
+ unsigned int mmc_rx_tcp_err_octets;
+ unsigned int mmc_rx_icmp_gd_octets;
+ unsigned int mmc_rx_icmp_err_octets;
+
+ /* FPE */
+ unsigned int mmc_tx_fpe_fragment_cntr;
+ unsigned int mmc_tx_hold_req_cntr;
+ unsigned int mmc_rx_packet_assembly_err_cntr;
+ unsigned int mmc_rx_packet_smd_err_cntr;
+ unsigned int mmc_rx_packet_assembly_ok_cntr;
+ unsigned int mmc_rx_fpe_fragment_cntr;
+};
+
+#endif /* __MMC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
new file mode 100644
index 000000000..6a7c1d325
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -0,0 +1,479 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ DWMAC Management Counters
+
+ Copyright (C) 2011 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include "hwif.h"
+#include "mmc.h"
+
+/* MAC Management Counters register offset */
+
+#define MMC_CNTRL 0x00 /* MMC Control */
+#define MMC_RX_INTR 0x04 /* MMC RX Interrupt */
+#define MMC_TX_INTR 0x08 /* MMC TX Interrupt */
+#define MMC_RX_INTR_MASK 0x0c /* MMC Interrupt Mask */
+#define MMC_TX_INTR_MASK 0x10 /* MMC Interrupt Mask */
+#define MMC_DEFAULT_MASK 0xffffffff
+
+/* MMC TX counter registers */
+
+/* Note:
+ * _GB register stands for good and bad frames
+ * _G is for good only.
+ */
+#define MMC_TX_OCTETCOUNT_GB 0x14
+#define MMC_TX_FRAMECOUNT_GB 0x18
+#define MMC_TX_BROADCASTFRAME_G 0x1c
+#define MMC_TX_MULTICASTFRAME_G 0x20
+#define MMC_TX_64_OCTETS_GB 0x24
+#define MMC_TX_65_TO_127_OCTETS_GB 0x28
+#define MMC_TX_128_TO_255_OCTETS_GB 0x2c
+#define MMC_TX_256_TO_511_OCTETS_GB 0x30
+#define MMC_TX_512_TO_1023_OCTETS_GB 0x34
+#define MMC_TX_1024_TO_MAX_OCTETS_GB 0x38
+#define MMC_TX_UNICAST_GB 0x3c
+#define MMC_TX_MULTICAST_GB 0x40
+#define MMC_TX_BROADCAST_GB 0x44
+#define MMC_TX_UNDERFLOW_ERROR 0x48
+#define MMC_TX_SINGLECOL_G 0x4c
+#define MMC_TX_MULTICOL_G 0x50
+#define MMC_TX_DEFERRED 0x54
+#define MMC_TX_LATECOL 0x58
+#define MMC_TX_EXESSCOL 0x5c
+#define MMC_TX_CARRIER_ERROR 0x60
+#define MMC_TX_OCTETCOUNT_G 0x64
+#define MMC_TX_FRAMECOUNT_G 0x68
+#define MMC_TX_EXCESSDEF 0x6c
+#define MMC_TX_PAUSE_FRAME 0x70
+#define MMC_TX_VLAN_FRAME_G 0x74
+
+/* MMC RX counter registers */
+#define MMC_RX_FRAMECOUNT_GB 0x80
+#define MMC_RX_OCTETCOUNT_GB 0x84
+#define MMC_RX_OCTETCOUNT_G 0x88
+#define MMC_RX_BROADCASTFRAME_G 0x8c
+#define MMC_RX_MULTICASTFRAME_G 0x90
+#define MMC_RX_CRC_ERROR 0x94
+#define MMC_RX_ALIGN_ERROR 0x98
+#define MMC_RX_RUN_ERROR 0x9C
+#define MMC_RX_JABBER_ERROR 0xA0
+#define MMC_RX_UNDERSIZE_G 0xA4
+#define MMC_RX_OVERSIZE_G 0xA8
+#define MMC_RX_64_OCTETS_GB 0xAC
+#define MMC_RX_65_TO_127_OCTETS_GB 0xb0
+#define MMC_RX_128_TO_255_OCTETS_GB 0xb4
+#define MMC_RX_256_TO_511_OCTETS_GB 0xb8
+#define MMC_RX_512_TO_1023_OCTETS_GB 0xbc
+#define MMC_RX_1024_TO_MAX_OCTETS_GB 0xc0
+#define MMC_RX_UNICAST_G 0xc4
+#define MMC_RX_LENGTH_ERROR 0xc8
+#define MMC_RX_AUTOFRANGETYPE 0xcc
+#define MMC_RX_PAUSE_FRAMES 0xd0
+#define MMC_RX_FIFO_OVERFLOW 0xd4
+#define MMC_RX_VLAN_FRAMES_GB 0xd8
+#define MMC_RX_WATCHDOG_ERROR 0xdc
+/* IPC*/
+#define MMC_RX_IPC_INTR_MASK 0x100
+#define MMC_RX_IPC_INTR 0x108
+/* IPv4*/
+#define MMC_RX_IPV4_GD 0x110
+#define MMC_RX_IPV4_HDERR 0x114
+#define MMC_RX_IPV4_NOPAY 0x118
+#define MMC_RX_IPV4_FRAG 0x11C
+#define MMC_RX_IPV4_UDSBL 0x120
+
+#define MMC_RX_IPV4_GD_OCTETS 0x150
+#define MMC_RX_IPV4_HDERR_OCTETS 0x154
+#define MMC_RX_IPV4_NOPAY_OCTETS 0x158
+#define MMC_RX_IPV4_FRAG_OCTETS 0x15c
+#define MMC_RX_IPV4_UDSBL_OCTETS 0x160
+
+/* IPV6*/
+#define MMC_RX_IPV6_GD_OCTETS 0x164
+#define MMC_RX_IPV6_HDERR_OCTETS 0x168
+#define MMC_RX_IPV6_NOPAY_OCTETS 0x16c
+
+#define MMC_RX_IPV6_GD 0x124
+#define MMC_RX_IPV6_HDERR 0x128
+#define MMC_RX_IPV6_NOPAY 0x12c
+
+/* Protocols*/
+#define MMC_RX_UDP_GD 0x130
+#define MMC_RX_UDP_ERR 0x134
+#define MMC_RX_TCP_GD 0x138
+#define MMC_RX_TCP_ERR 0x13c
+#define MMC_RX_ICMP_GD 0x140
+#define MMC_RX_ICMP_ERR 0x144
+
+#define MMC_RX_UDP_GD_OCTETS 0x170
+#define MMC_RX_UDP_ERR_OCTETS 0x174
+#define MMC_RX_TCP_GD_OCTETS 0x178
+#define MMC_RX_TCP_ERR_OCTETS 0x17c
+#define MMC_RX_ICMP_GD_OCTETS 0x180
+#define MMC_RX_ICMP_ERR_OCTETS 0x184
+
+#define MMC_TX_FPE_FRAG 0x1a8
+#define MMC_TX_HOLD_REQ 0x1ac
+#define MMC_RX_PKT_ASSEMBLY_ERR 0x1c8
+#define MMC_RX_PKT_SMD_ERR 0x1cc
+#define MMC_RX_PKT_ASSEMBLY_OK 0x1d0
+#define MMC_RX_FPE_FRAG 0x1d4
+
+/* XGMAC MMC Registers */
+#define MMC_XGMAC_TX_OCTET_GB 0x14
+#define MMC_XGMAC_TX_PKT_GB 0x1c
+#define MMC_XGMAC_TX_BROAD_PKT_G 0x24
+#define MMC_XGMAC_TX_MULTI_PKT_G 0x2c
+#define MMC_XGMAC_TX_64OCT_GB 0x34
+#define MMC_XGMAC_TX_65OCT_GB 0x3c
+#define MMC_XGMAC_TX_128OCT_GB 0x44
+#define MMC_XGMAC_TX_256OCT_GB 0x4c
+#define MMC_XGMAC_TX_512OCT_GB 0x54
+#define MMC_XGMAC_TX_1024OCT_GB 0x5c
+#define MMC_XGMAC_TX_UNI_PKT_GB 0x64
+#define MMC_XGMAC_TX_MULTI_PKT_GB 0x6c
+#define MMC_XGMAC_TX_BROAD_PKT_GB 0x74
+#define MMC_XGMAC_TX_UNDER 0x7c
+#define MMC_XGMAC_TX_OCTET_G 0x84
+#define MMC_XGMAC_TX_PKT_G 0x8c
+#define MMC_XGMAC_TX_PAUSE 0x94
+#define MMC_XGMAC_TX_VLAN_PKT_G 0x9c
+#define MMC_XGMAC_TX_LPI_USEC 0xa4
+#define MMC_XGMAC_TX_LPI_TRAN 0xa8
+
+#define MMC_XGMAC_RX_PKT_GB 0x100
+#define MMC_XGMAC_RX_OCTET_GB 0x108
+#define MMC_XGMAC_RX_OCTET_G 0x110
+#define MMC_XGMAC_RX_BROAD_PKT_G 0x118
+#define MMC_XGMAC_RX_MULTI_PKT_G 0x120
+#define MMC_XGMAC_RX_CRC_ERR 0x128
+#define MMC_XGMAC_RX_RUNT_ERR 0x130
+#define MMC_XGMAC_RX_JABBER_ERR 0x134
+#define MMC_XGMAC_RX_UNDER 0x138
+#define MMC_XGMAC_RX_OVER 0x13c
+#define MMC_XGMAC_RX_64OCT_GB 0x140
+#define MMC_XGMAC_RX_65OCT_GB 0x148
+#define MMC_XGMAC_RX_128OCT_GB 0x150
+#define MMC_XGMAC_RX_256OCT_GB 0x158
+#define MMC_XGMAC_RX_512OCT_GB 0x160
+#define MMC_XGMAC_RX_1024OCT_GB 0x168
+#define MMC_XGMAC_RX_UNI_PKT_G 0x170
+#define MMC_XGMAC_RX_LENGTH_ERR 0x178
+#define MMC_XGMAC_RX_RANGE 0x180
+#define MMC_XGMAC_RX_PAUSE 0x188
+#define MMC_XGMAC_RX_FIFOOVER_PKT 0x190
+#define MMC_XGMAC_RX_VLAN_PKT_GB 0x198
+#define MMC_XGMAC_RX_WATCHDOG_ERR 0x1a0
+#define MMC_XGMAC_RX_LPI_USEC 0x1a4
+#define MMC_XGMAC_RX_LPI_TRAN 0x1a8
+#define MMC_XGMAC_RX_DISCARD_PKT_GB 0x1ac
+#define MMC_XGMAC_RX_DISCARD_OCT_GB 0x1b4
+#define MMC_XGMAC_RX_ALIGN_ERR_PKT 0x1bc
+
+#define MMC_XGMAC_TX_FPE_INTR_MASK 0x204
+#define MMC_XGMAC_TX_FPE_FRAG 0x208
+#define MMC_XGMAC_TX_HOLD_REQ 0x20c
+#define MMC_XGMAC_RX_FPE_INTR_MASK 0x224
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_ERR 0x228
+#define MMC_XGMAC_RX_PKT_SMD_ERR 0x22c
+#define MMC_XGMAC_RX_PKT_ASSEMBLY_OK 0x230
+#define MMC_XGMAC_RX_FPE_FRAG 0x234
+#define MMC_XGMAC_RX_IPC_INTR_MASK 0x25c
+
+static void dwmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
+{
+ u32 value = readl(mmcaddr + MMC_CNTRL);
+
+ value |= (mode & 0x3F);
+
+ writel(value, mmcaddr + MMC_CNTRL);
+
+ pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
+ MMC_CNTRL, value);
+}
+
+/* To mask all interrupts.*/
+static void dwmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+{
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_RX_IPC_INTR_MASK);
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+static void dwmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
+{
+ mmc->mmc_tx_octetcount_gb += readl(mmcaddr + MMC_TX_OCTETCOUNT_GB);
+ mmc->mmc_tx_framecount_gb += readl(mmcaddr + MMC_TX_FRAMECOUNT_GB);
+ mmc->mmc_tx_broadcastframe_g += readl(mmcaddr +
+ MMC_TX_BROADCASTFRAME_G);
+ mmc->mmc_tx_multicastframe_g += readl(mmcaddr +
+ MMC_TX_MULTICASTFRAME_G);
+ mmc->mmc_tx_64_octets_gb += readl(mmcaddr + MMC_TX_64_OCTETS_GB);
+ mmc->mmc_tx_65_to_127_octets_gb +=
+ readl(mmcaddr + MMC_TX_65_TO_127_OCTETS_GB);
+ mmc->mmc_tx_128_to_255_octets_gb +=
+ readl(mmcaddr + MMC_TX_128_TO_255_OCTETS_GB);
+ mmc->mmc_tx_256_to_511_octets_gb +=
+ readl(mmcaddr + MMC_TX_256_TO_511_OCTETS_GB);
+ mmc->mmc_tx_512_to_1023_octets_gb +=
+ readl(mmcaddr + MMC_TX_512_TO_1023_OCTETS_GB);
+ mmc->mmc_tx_1024_to_max_octets_gb +=
+ readl(mmcaddr + MMC_TX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_tx_unicast_gb += readl(mmcaddr + MMC_TX_UNICAST_GB);
+ mmc->mmc_tx_multicast_gb += readl(mmcaddr + MMC_TX_MULTICAST_GB);
+ mmc->mmc_tx_broadcast_gb += readl(mmcaddr + MMC_TX_BROADCAST_GB);
+ mmc->mmc_tx_underflow_error += readl(mmcaddr + MMC_TX_UNDERFLOW_ERROR);
+ mmc->mmc_tx_singlecol_g += readl(mmcaddr + MMC_TX_SINGLECOL_G);
+ mmc->mmc_tx_multicol_g += readl(mmcaddr + MMC_TX_MULTICOL_G);
+ mmc->mmc_tx_deferred += readl(mmcaddr + MMC_TX_DEFERRED);
+ mmc->mmc_tx_latecol += readl(mmcaddr + MMC_TX_LATECOL);
+ mmc->mmc_tx_exesscol += readl(mmcaddr + MMC_TX_EXESSCOL);
+ mmc->mmc_tx_carrier_error += readl(mmcaddr + MMC_TX_CARRIER_ERROR);
+ mmc->mmc_tx_octetcount_g += readl(mmcaddr + MMC_TX_OCTETCOUNT_G);
+ mmc->mmc_tx_framecount_g += readl(mmcaddr + MMC_TX_FRAMECOUNT_G);
+ mmc->mmc_tx_excessdef += readl(mmcaddr + MMC_TX_EXCESSDEF);
+ mmc->mmc_tx_pause_frame += readl(mmcaddr + MMC_TX_PAUSE_FRAME);
+ mmc->mmc_tx_vlan_frame_g += readl(mmcaddr + MMC_TX_VLAN_FRAME_G);
+
+ /* MMC RX counter registers */
+ mmc->mmc_rx_framecount_gb += readl(mmcaddr + MMC_RX_FRAMECOUNT_GB);
+ mmc->mmc_rx_octetcount_gb += readl(mmcaddr + MMC_RX_OCTETCOUNT_GB);
+ mmc->mmc_rx_octetcount_g += readl(mmcaddr + MMC_RX_OCTETCOUNT_G);
+ mmc->mmc_rx_broadcastframe_g += readl(mmcaddr +
+ MMC_RX_BROADCASTFRAME_G);
+ mmc->mmc_rx_multicastframe_g += readl(mmcaddr +
+ MMC_RX_MULTICASTFRAME_G);
+ mmc->mmc_rx_crc_error += readl(mmcaddr + MMC_RX_CRC_ERROR);
+ mmc->mmc_rx_align_error += readl(mmcaddr + MMC_RX_ALIGN_ERROR);
+ mmc->mmc_rx_run_error += readl(mmcaddr + MMC_RX_RUN_ERROR);
+ mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_RX_JABBER_ERROR);
+ mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_RX_UNDERSIZE_G);
+ mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_RX_OVERSIZE_G);
+ mmc->mmc_rx_64_octets_gb += readl(mmcaddr + MMC_RX_64_OCTETS_GB);
+ mmc->mmc_rx_65_to_127_octets_gb +=
+ readl(mmcaddr + MMC_RX_65_TO_127_OCTETS_GB);
+ mmc->mmc_rx_128_to_255_octets_gb +=
+ readl(mmcaddr + MMC_RX_128_TO_255_OCTETS_GB);
+ mmc->mmc_rx_256_to_511_octets_gb +=
+ readl(mmcaddr + MMC_RX_256_TO_511_OCTETS_GB);
+ mmc->mmc_rx_512_to_1023_octets_gb +=
+ readl(mmcaddr + MMC_RX_512_TO_1023_OCTETS_GB);
+ mmc->mmc_rx_1024_to_max_octets_gb +=
+ readl(mmcaddr + MMC_RX_1024_TO_MAX_OCTETS_GB);
+ mmc->mmc_rx_unicast_g += readl(mmcaddr + MMC_RX_UNICAST_G);
+ mmc->mmc_rx_length_error += readl(mmcaddr + MMC_RX_LENGTH_ERROR);
+ mmc->mmc_rx_autofrangetype += readl(mmcaddr + MMC_RX_AUTOFRANGETYPE);
+ mmc->mmc_rx_pause_frames += readl(mmcaddr + MMC_RX_PAUSE_FRAMES);
+ mmc->mmc_rx_fifo_overflow += readl(mmcaddr + MMC_RX_FIFO_OVERFLOW);
+ mmc->mmc_rx_vlan_frames_gb += readl(mmcaddr + MMC_RX_VLAN_FRAMES_GB);
+ mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_RX_WATCHDOG_ERROR);
+ /* IPC */
+ mmc->mmc_rx_ipc_intr_mask += readl(mmcaddr + MMC_RX_IPC_INTR_MASK);
+ mmc->mmc_rx_ipc_intr += readl(mmcaddr + MMC_RX_IPC_INTR);
+ /* IPv4 */
+ mmc->mmc_rx_ipv4_gd += readl(mmcaddr + MMC_RX_IPV4_GD);
+ mmc->mmc_rx_ipv4_hderr += readl(mmcaddr + MMC_RX_IPV4_HDERR);
+ mmc->mmc_rx_ipv4_nopay += readl(mmcaddr + MMC_RX_IPV4_NOPAY);
+ mmc->mmc_rx_ipv4_frag += readl(mmcaddr + MMC_RX_IPV4_FRAG);
+ mmc->mmc_rx_ipv4_udsbl += readl(mmcaddr + MMC_RX_IPV4_UDSBL);
+
+ mmc->mmc_rx_ipv4_gd_octets += readl(mmcaddr + MMC_RX_IPV4_GD_OCTETS);
+ mmc->mmc_rx_ipv4_hderr_octets +=
+ readl(mmcaddr + MMC_RX_IPV4_HDERR_OCTETS);
+ mmc->mmc_rx_ipv4_nopay_octets +=
+ readl(mmcaddr + MMC_RX_IPV4_NOPAY_OCTETS);
+ mmc->mmc_rx_ipv4_frag_octets += readl(mmcaddr +
+ MMC_RX_IPV4_FRAG_OCTETS);
+ mmc->mmc_rx_ipv4_udsbl_octets +=
+ readl(mmcaddr + MMC_RX_IPV4_UDSBL_OCTETS);
+
+ /* IPV6 */
+ mmc->mmc_rx_ipv6_gd_octets += readl(mmcaddr + MMC_RX_IPV6_GD_OCTETS);
+ mmc->mmc_rx_ipv6_hderr_octets +=
+ readl(mmcaddr + MMC_RX_IPV6_HDERR_OCTETS);
+ mmc->mmc_rx_ipv6_nopay_octets +=
+ readl(mmcaddr + MMC_RX_IPV6_NOPAY_OCTETS);
+
+ mmc->mmc_rx_ipv6_gd += readl(mmcaddr + MMC_RX_IPV6_GD);
+ mmc->mmc_rx_ipv6_hderr += readl(mmcaddr + MMC_RX_IPV6_HDERR);
+ mmc->mmc_rx_ipv6_nopay += readl(mmcaddr + MMC_RX_IPV6_NOPAY);
+
+ /* Protocols */
+ mmc->mmc_rx_udp_gd += readl(mmcaddr + MMC_RX_UDP_GD);
+ mmc->mmc_rx_udp_err += readl(mmcaddr + MMC_RX_UDP_ERR);
+ mmc->mmc_rx_tcp_gd += readl(mmcaddr + MMC_RX_TCP_GD);
+ mmc->mmc_rx_tcp_err += readl(mmcaddr + MMC_RX_TCP_ERR);
+ mmc->mmc_rx_icmp_gd += readl(mmcaddr + MMC_RX_ICMP_GD);
+ mmc->mmc_rx_icmp_err += readl(mmcaddr + MMC_RX_ICMP_ERR);
+
+ mmc->mmc_rx_udp_gd_octets += readl(mmcaddr + MMC_RX_UDP_GD_OCTETS);
+ mmc->mmc_rx_udp_err_octets += readl(mmcaddr + MMC_RX_UDP_ERR_OCTETS);
+ mmc->mmc_rx_tcp_gd_octets += readl(mmcaddr + MMC_RX_TCP_GD_OCTETS);
+ mmc->mmc_rx_tcp_err_octets += readl(mmcaddr + MMC_RX_TCP_ERR_OCTETS);
+ mmc->mmc_rx_icmp_gd_octets += readl(mmcaddr + MMC_RX_ICMP_GD_OCTETS);
+ mmc->mmc_rx_icmp_err_octets += readl(mmcaddr + MMC_RX_ICMP_ERR_OCTETS);
+
+ mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_TX_FPE_FRAG);
+ mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_TX_HOLD_REQ);
+ mmc->mmc_rx_packet_assembly_err_cntr +=
+ readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_ERR);
+ mmc->mmc_rx_packet_smd_err_cntr += readl(mmcaddr + MMC_RX_PKT_SMD_ERR);
+ mmc->mmc_rx_packet_assembly_ok_cntr +=
+ readl(mmcaddr + MMC_RX_PKT_ASSEMBLY_OK);
+ mmc->mmc_rx_fpe_fragment_cntr += readl(mmcaddr + MMC_RX_FPE_FRAG);
+}
+
+const struct stmmac_mmc_ops dwmac_mmc_ops = {
+ .ctrl = dwmac_mmc_ctrl,
+ .intr_all_mask = dwmac_mmc_intr_all_mask,
+ .read = dwmac_mmc_read,
+};
+
+static void dwxgmac_mmc_ctrl(void __iomem *mmcaddr, unsigned int mode)
+{
+ u32 value = readl(mmcaddr + MMC_CNTRL);
+
+ value |= (mode & 0x3F);
+
+ writel(value, mmcaddr + MMC_CNTRL);
+}
+
+static void dwxgmac_mmc_intr_all_mask(void __iomem *mmcaddr)
+{
+ writel(0x0, mmcaddr + MMC_RX_INTR_MASK);
+ writel(0x0, mmcaddr + MMC_TX_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_TX_FPE_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_FPE_INTR_MASK);
+ writel(MMC_DEFAULT_MASK, mmcaddr + MMC_XGMAC_RX_IPC_INTR_MASK);
+}
+
+static void dwxgmac_read_mmc_reg(void __iomem *addr, u32 reg, u32 *dest)
+{
+ u64 tmp = 0;
+
+ tmp += readl(addr + reg);
+ tmp += ((u64 )readl(addr + reg + 0x4)) << 32;
+ if (tmp > GENMASK(31, 0))
+ *dest = ~0x0;
+ else
+ *dest = *dest + tmp;
+}
+
+/* This reads the MAC core counters (if actaully supported).
+ * by default the MMC core is programmed to reset each
+ * counter after a read. So all the field of the mmc struct
+ * have to be incremented.
+ */
+static void dwxgmac_mmc_read(void __iomem *mmcaddr, struct stmmac_counters *mmc)
+{
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_GB,
+ &mmc->mmc_tx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_GB,
+ &mmc->mmc_tx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_G,
+ &mmc->mmc_tx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_G,
+ &mmc->mmc_tx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_64OCT_GB,
+ &mmc->mmc_tx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_65OCT_GB,
+ &mmc->mmc_tx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_128OCT_GB,
+ &mmc->mmc_tx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_256OCT_GB,
+ &mmc->mmc_tx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_512OCT_GB,
+ &mmc->mmc_tx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_1024OCT_GB,
+ &mmc->mmc_tx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNI_PKT_GB,
+ &mmc->mmc_tx_unicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_MULTI_PKT_GB,
+ &mmc->mmc_tx_multicast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_BROAD_PKT_GB,
+ &mmc->mmc_tx_broadcast_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_UNDER,
+ &mmc->mmc_tx_underflow_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_OCTET_G,
+ &mmc->mmc_tx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PKT_G,
+ &mmc->mmc_tx_framecount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_PAUSE,
+ &mmc->mmc_tx_pause_frame);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_TX_VLAN_PKT_G,
+ &mmc->mmc_tx_vlan_frame_g);
+
+ /* MMC RX counter registers */
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PKT_GB,
+ &mmc->mmc_rx_framecount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_GB,
+ &mmc->mmc_rx_octetcount_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_OCTET_G,
+ &mmc->mmc_rx_octetcount_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_BROAD_PKT_G,
+ &mmc->mmc_rx_broadcastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_MULTI_PKT_G,
+ &mmc->mmc_rx_multicastframe_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_CRC_ERR,
+ &mmc->mmc_rx_crc_error);
+ mmc->mmc_rx_run_error += readl(mmcaddr + MMC_XGMAC_RX_RUNT_ERR);
+ mmc->mmc_rx_jabber_error += readl(mmcaddr + MMC_XGMAC_RX_JABBER_ERR);
+ mmc->mmc_rx_undersize_g += readl(mmcaddr + MMC_XGMAC_RX_UNDER);
+ mmc->mmc_rx_oversize_g += readl(mmcaddr + MMC_XGMAC_RX_OVER);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_64OCT_GB,
+ &mmc->mmc_rx_64_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_65OCT_GB,
+ &mmc->mmc_rx_65_to_127_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_128OCT_GB,
+ &mmc->mmc_rx_128_to_255_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_256OCT_GB,
+ &mmc->mmc_rx_256_to_511_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_512OCT_GB,
+ &mmc->mmc_rx_512_to_1023_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_1024OCT_GB,
+ &mmc->mmc_rx_1024_to_max_octets_gb);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_UNI_PKT_G,
+ &mmc->mmc_rx_unicast_g);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_LENGTH_ERR,
+ &mmc->mmc_rx_length_error);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_RANGE,
+ &mmc->mmc_rx_autofrangetype);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_PAUSE,
+ &mmc->mmc_rx_pause_frames);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_FIFOOVER_PKT,
+ &mmc->mmc_rx_fifo_overflow);
+ dwxgmac_read_mmc_reg(mmcaddr, MMC_XGMAC_RX_VLAN_PKT_GB,
+ &mmc->mmc_rx_vlan_frames_gb);
+ mmc->mmc_rx_watchdog_error += readl(mmcaddr + MMC_XGMAC_RX_WATCHDOG_ERR);
+
+ mmc->mmc_tx_fpe_fragment_cntr += readl(mmcaddr + MMC_XGMAC_TX_FPE_FRAG);
+ mmc->mmc_tx_hold_req_cntr += readl(mmcaddr + MMC_XGMAC_TX_HOLD_REQ);
+ mmc->mmc_rx_packet_assembly_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_ERR);
+ mmc->mmc_rx_packet_smd_err_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_SMD_ERR);
+ mmc->mmc_rx_packet_assembly_ok_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_PKT_ASSEMBLY_OK);
+ mmc->mmc_rx_fpe_fragment_cntr +=
+ readl(mmcaddr + MMC_XGMAC_RX_FPE_FRAG);
+}
+
+const struct stmmac_mmc_ops dwxgmac_mmc_ops = {
+ .ctrl = dwxgmac_mmc_ctrl,
+ .intr_all_mask = dwxgmac_mmc_intr_all_mask,
+ .read = dwxgmac_mmc_read,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/norm_desc.c b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
new file mode 100644
index 000000000..e3da4da24
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/norm_desc.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This contains the functions to handle the normal descriptors.
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/stmmac.h>
+#include "common.h"
+#include "descs_com.h"
+
+static int ndesc_get_tx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p, void __iomem *ioaddr)
+{
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+ unsigned int tdes0 = le32_to_cpu(p->des0);
+ unsigned int tdes1 = le32_to_cpu(p->des1);
+ int ret = tx_done;
+
+ /* Get tx owner first */
+ if (unlikely(tdes0 & TDES0_OWN))
+ return tx_dma_own;
+
+ /* Verify tx error by looking at the last segment. */
+ if (likely(!(tdes1 & TDES1_LAST_SEGMENT)))
+ return tx_not_ls;
+
+ if (unlikely(tdes0 & TDES0_ERROR_SUMMARY)) {
+ if (unlikely(tdes0 & TDES0_UNDERFLOW_ERROR)) {
+ x->tx_underflow++;
+ stats->tx_fifo_errors++;
+ }
+ if (unlikely(tdes0 & TDES0_NO_CARRIER)) {
+ x->tx_carrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely(tdes0 & TDES0_LOSS_CARRIER)) {
+ x->tx_losscarrier++;
+ stats->tx_carrier_errors++;
+ }
+ if (unlikely((tdes0 & TDES0_EXCESSIVE_DEFERRAL) ||
+ (tdes0 & TDES0_EXCESSIVE_COLLISIONS) ||
+ (tdes0 & TDES0_LATE_COLLISION))) {
+ unsigned int collisions;
+
+ collisions = (tdes0 & TDES0_COLLISION_COUNT_MASK) >> 3;
+ stats->collisions += collisions;
+ }
+ ret = tx_err;
+ }
+
+ if (tdes0 & TDES0_VLAN_FRAME)
+ x->tx_vlan++;
+
+ if (unlikely(tdes0 & TDES0_DEFERRED))
+ x->tx_deferred++;
+
+ return ret;
+}
+
+static int ndesc_get_tx_len(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des1) & RDES1_BUFFER1_SIZE_MASK);
+}
+
+/* This function verifies if each incoming frame has some errors
+ * and, if required, updates the multicast statistics.
+ * In case of success, it returns good_frame because the GMAC device
+ * is supposed to be able to compute the csum in HW. */
+static int ndesc_get_rx_status(void *data, struct stmmac_extra_stats *x,
+ struct dma_desc *p)
+{
+ int ret = good_frame;
+ unsigned int rdes0 = le32_to_cpu(p->des0);
+ struct net_device_stats *stats = (struct net_device_stats *)data;
+
+ if (unlikely(rdes0 & RDES0_OWN))
+ return dma_own;
+
+ if (unlikely(!(rdes0 & RDES0_LAST_DESCRIPTOR))) {
+ stats->rx_length_errors++;
+ return discard_frame;
+ }
+
+ if (unlikely(rdes0 & RDES0_ERROR_SUMMARY)) {
+ if (unlikely(rdes0 & RDES0_DESCRIPTOR_ERROR))
+ x->rx_desc++;
+ if (unlikely(rdes0 & RDES0_SA_FILTER_FAIL))
+ x->sa_filter_fail++;
+ if (unlikely(rdes0 & RDES0_OVERFLOW_ERROR))
+ x->overflow_error++;
+ if (unlikely(rdes0 & RDES0_IPC_CSUM_ERROR))
+ x->ipc_csum_error++;
+ if (unlikely(rdes0 & RDES0_COLLISION)) {
+ x->rx_collision++;
+ stats->collisions++;
+ }
+ if (unlikely(rdes0 & RDES0_CRC_ERROR)) {
+ x->rx_crc_errors++;
+ stats->rx_crc_errors++;
+ }
+ ret = discard_frame;
+ }
+ if (unlikely(rdes0 & RDES0_DRIBBLING))
+ x->dribbling_bit++;
+
+ if (unlikely(rdes0 & RDES0_LENGTH_ERROR)) {
+ x->rx_length++;
+ ret = discard_frame;
+ }
+ if (unlikely(rdes0 & RDES0_MII_ERROR)) {
+ x->rx_mii++;
+ ret = discard_frame;
+ }
+#ifdef STMMAC_VLAN_TAG_USED
+ if (rdes0 & RDES0_VLAN_TAG)
+ x->vlan_tag++;
+#endif
+ return ret;
+}
+
+static void ndesc_init_rx_desc(struct dma_desc *p, int disable_rx_ic, int mode,
+ int end, int bfsize)
+{
+ int bfsize1;
+
+ p->des0 |= cpu_to_le32(RDES0_OWN);
+
+ bfsize1 = min(bfsize, BUF_SIZE_2KiB - 1);
+ p->des1 |= cpu_to_le32(bfsize1 & RDES1_BUFFER1_SIZE_MASK);
+
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_rx_set_on_chain(p, end);
+ else
+ ndesc_rx_set_on_ring(p, end, bfsize);
+
+ if (disable_rx_ic)
+ p->des1 |= cpu_to_le32(RDES1_DISABLE_IC);
+}
+
+static void ndesc_init_tx_desc(struct dma_desc *p, int mode, int end)
+{
+ p->des0 &= cpu_to_le32(~TDES0_OWN);
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_tx_set_on_chain(p);
+ else
+ ndesc_end_tx_desc_on_ring(p, end);
+}
+
+static int ndesc_get_tx_owner(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des0) & TDES0_OWN) >> 31;
+}
+
+static void ndesc_set_tx_owner(struct dma_desc *p)
+{
+ p->des0 |= cpu_to_le32(TDES0_OWN);
+}
+
+static void ndesc_set_rx_owner(struct dma_desc *p, int disable_rx_ic)
+{
+ p->des0 |= cpu_to_le32(RDES0_OWN);
+}
+
+static int ndesc_get_tx_ls(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des1) & TDES1_LAST_SEGMENT) >> 30;
+}
+
+static void ndesc_release_tx_desc(struct dma_desc *p, int mode)
+{
+ int ter = (le32_to_cpu(p->des1) & TDES1_END_RING) >> 25;
+
+ memset(p, 0, offsetof(struct dma_desc, des2));
+ if (mode == STMMAC_CHAIN_MODE)
+ ndesc_tx_set_on_chain(p);
+ else
+ ndesc_end_tx_desc_on_ring(p, ter);
+}
+
+static void ndesc_prepare_tx_desc(struct dma_desc *p, int is_fs, int len,
+ bool csum_flag, int mode, bool tx_own,
+ bool ls, unsigned int tot_pkt_len)
+{
+ unsigned int tdes1 = le32_to_cpu(p->des1);
+
+ if (is_fs)
+ tdes1 |= TDES1_FIRST_SEGMENT;
+ else
+ tdes1 &= ~TDES1_FIRST_SEGMENT;
+
+ if (likely(csum_flag))
+ tdes1 |= (TX_CIC_FULL) << TDES1_CHECKSUM_INSERTION_SHIFT;
+ else
+ tdes1 &= ~(TX_CIC_FULL << TDES1_CHECKSUM_INSERTION_SHIFT);
+
+ if (ls)
+ tdes1 |= TDES1_LAST_SEGMENT;
+
+ p->des1 = cpu_to_le32(tdes1);
+
+ if (mode == STMMAC_CHAIN_MODE)
+ norm_set_tx_desc_len_on_chain(p, len);
+ else
+ norm_set_tx_desc_len_on_ring(p, len);
+
+ if (tx_own)
+ p->des0 |= cpu_to_le32(TDES0_OWN);
+}
+
+static void ndesc_set_tx_ic(struct dma_desc *p)
+{
+ p->des1 |= cpu_to_le32(TDES1_INTERRUPT);
+}
+
+static int ndesc_get_rx_frame_len(struct dma_desc *p, int rx_coe_type)
+{
+ unsigned int csum = 0;
+
+ /* The type-1 checksum offload engines append the checksum at
+ * the end of frame and the two bytes of checksum are added in
+ * the length.
+ * Adjust for that in the framelen for type-1 checksum offload
+ * engines
+ */
+ if (rx_coe_type == STMMAC_RX_COE_TYPE1)
+ csum = 2;
+
+ return (((le32_to_cpu(p->des0) & RDES0_FRAME_LEN_MASK)
+ >> RDES0_FRAME_LEN_SHIFT) -
+ csum);
+
+}
+
+static void ndesc_enable_tx_timestamp(struct dma_desc *p)
+{
+ p->des1 |= cpu_to_le32(TDES1_TIME_STAMP_ENABLE);
+}
+
+static int ndesc_get_tx_timestamp_status(struct dma_desc *p)
+{
+ return (le32_to_cpu(p->des0) & TDES0_TIME_STAMP_STATUS) >> 17;
+}
+
+static void ndesc_get_timestamp(void *desc, u32 ats, u64 *ts)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+ u64 ns;
+
+ ns = le32_to_cpu(p->des2);
+ /* convert high/sec time stamp value to nanosecond */
+ ns += le32_to_cpu(p->des3) * 1000000000ULL;
+
+ *ts = ns;
+}
+
+static int ndesc_get_rx_timestamp_status(void *desc, void *next_desc, u32 ats)
+{
+ struct dma_desc *p = (struct dma_desc *)desc;
+
+ if ((le32_to_cpu(p->des2) == 0xffffffff) &&
+ (le32_to_cpu(p->des3) == 0xffffffff))
+ /* timestamp is corrupted, hence don't store it */
+ return 0;
+ else
+ return 1;
+}
+
+static void ndesc_display_ring(void *head, unsigned int size, bool rx,
+ dma_addr_t dma_rx_phy, unsigned int desc_size)
+{
+ struct dma_desc *p = (struct dma_desc *)head;
+ dma_addr_t dma_addr;
+ int i;
+
+ pr_info("%s descriptor ring:\n", rx ? "RX" : "TX");
+
+ for (i = 0; i < size; i++) {
+ u64 x;
+ dma_addr = dma_rx_phy + i * sizeof(*p);
+
+ x = *(u64 *)p;
+ pr_info("%03d [%pad]: 0x%x 0x%x 0x%x 0x%x",
+ i, &dma_addr,
+ (unsigned int)x, (unsigned int)(x >> 32),
+ p->des2, p->des3);
+ p++;
+ }
+ pr_info("\n");
+}
+
+static void ndesc_set_addr(struct dma_desc *p, dma_addr_t addr)
+{
+ p->des2 = cpu_to_le32(addr);
+}
+
+static void ndesc_clear(struct dma_desc *p)
+{
+ p->des2 = 0;
+}
+
+const struct stmmac_desc_ops ndesc_ops = {
+ .tx_status = ndesc_get_tx_status,
+ .rx_status = ndesc_get_rx_status,
+ .get_tx_len = ndesc_get_tx_len,
+ .init_rx_desc = ndesc_init_rx_desc,
+ .init_tx_desc = ndesc_init_tx_desc,
+ .get_tx_owner = ndesc_get_tx_owner,
+ .release_tx_desc = ndesc_release_tx_desc,
+ .prepare_tx_desc = ndesc_prepare_tx_desc,
+ .set_tx_ic = ndesc_set_tx_ic,
+ .get_tx_ls = ndesc_get_tx_ls,
+ .set_tx_owner = ndesc_set_tx_owner,
+ .set_rx_owner = ndesc_set_rx_owner,
+ .get_rx_frame_len = ndesc_get_rx_frame_len,
+ .enable_tx_timestamp = ndesc_enable_tx_timestamp,
+ .get_tx_timestamp_status = ndesc_get_tx_timestamp_status,
+ .get_timestamp = ndesc_get_timestamp,
+ .get_rx_timestamp_status = ndesc_get_rx_timestamp_status,
+ .display_ring = ndesc_display_ring,
+ .set_addr = ndesc_set_addr,
+ .clear = ndesc_clear,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
new file mode 100644
index 000000000..2b5b17d8b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ Specialised functions for managing Ring mode
+
+ Copyright(C) 2011 STMicroelectronics Ltd
+
+ It defines all the functions used to handle the normal/enhanced
+ descriptors in case of the DMA is configured to work in chained or
+ in ring mode.
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include "stmmac.h"
+
+static int jumbo_frm(void *p, struct sk_buff *skb, int csum)
+{
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)p;
+ unsigned int nopaged_len = skb_headlen(skb);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->cur_tx;
+ unsigned int bmax, len, des2;
+ struct dma_desc *desc;
+
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else
+ desc = tx_q->dma_tx + entry;
+
+ if (priv->plat->enh_desc)
+ bmax = BUF_SIZE_8KiB;
+ else
+ bmax = BUF_SIZE_2KiB;
+
+ len = nopaged_len - bmax;
+
+ if (nopaged_len > BUF_SIZE_8KiB) {
+
+ des2 = dma_map_single(priv->device, skb->data, bmax,
+ DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
+ return -1;
+
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = bmax;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = true;
+
+ desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+ stmmac_prepare_tx_desc(priv, desc, 1, bmax, csum,
+ STMMAC_RING_MODE, 0, false, skb->len);
+ tx_q->tx_skbuff[entry] = NULL;
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+
+ if (priv->extend_desc)
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else
+ desc = tx_q->dma_tx + entry;
+
+ des2 = dma_map_single(priv->device, skb->data + bmax, len,
+ DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
+ return -1;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = true;
+
+ desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+ stmmac_prepare_tx_desc(priv, desc, 0, len, csum,
+ STMMAC_RING_MODE, 1, !skb_is_nonlinear(skb),
+ skb->len);
+ } else {
+ des2 = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ desc->des2 = cpu_to_le32(des2);
+ if (dma_mapping_error(priv->device, des2))
+ return -1;
+ tx_q->tx_skbuff_dma[entry].buf = des2;
+ tx_q->tx_skbuff_dma[entry].len = nopaged_len;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = true;
+ desc->des3 = cpu_to_le32(des2 + BUF_SIZE_4KiB);
+ stmmac_prepare_tx_desc(priv, desc, 1, nopaged_len, csum,
+ STMMAC_RING_MODE, 0, !skb_is_nonlinear(skb),
+ skb->len);
+ }
+
+ tx_q->cur_tx = entry;
+
+ return entry;
+}
+
+static unsigned int is_jumbo_frm(int len, int enh_desc)
+{
+ unsigned int ret = 0;
+
+ if (len >= BUF_SIZE_4KiB)
+ ret = 1;
+
+ return ret;
+}
+
+static void refill_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ struct stmmac_rx_queue *rx_q = priv_ptr;
+ struct stmmac_priv *priv = rx_q->priv_data;
+
+ /* Fill DES3 in case of RING mode */
+ if (priv->dma_conf.dma_buf_sz == BUF_SIZE_16KiB)
+ p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
+}
+
+/* In ring mode we need to fill the desc3 because it is used as buffer */
+static void init_desc3(struct dma_desc *p)
+{
+ p->des3 = cpu_to_le32(le32_to_cpu(p->des2) + BUF_SIZE_8KiB);
+}
+
+static void clean_desc3(void *priv_ptr, struct dma_desc *p)
+{
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)priv_ptr;
+ struct stmmac_priv *priv = tx_q->priv_data;
+ unsigned int entry = tx_q->dirty_tx;
+
+ /* des3 is only used for jumbo frames tx or time stamping */
+ if (unlikely(tx_q->tx_skbuff_dma[entry].is_jumbo ||
+ (tx_q->tx_skbuff_dma[entry].last_segment &&
+ !priv->extend_desc && priv->hwts_tx_en)))
+ p->des3 = 0;
+}
+
+static int set_16kib_bfsize(int mtu)
+{
+ int ret = 0;
+ if (unlikely(mtu > BUF_SIZE_8KiB))
+ ret = BUF_SIZE_16KiB;
+ return ret;
+}
+
+const struct stmmac_mode_ops ring_mode_ops = {
+ .is_jumbo_frm = is_jumbo_frm,
+ .jumbo_frm = jumbo_frm,
+ .refill_desc3 = refill_desc3,
+ .init_desc3 = init_desc3,
+ .clean_desc3 = clean_desc3,
+ .set_16kib_bfsize = set_16kib_bfsize,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
new file mode 100644
index 000000000..46944c02b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __STMMAC_H__
+#define __STMMAC_H__
+
+#define STMMAC_RESOURCE_NAME "stmmaceth"
+
+#include <linux/clk.h>
+#include <linux/hrtimer.h>
+#include <linux/if_vlan.h>
+#include <linux/stmmac.h>
+#include <linux/phylink.h>
+#include <linux/pci.h>
+#include "common.h"
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/reset.h>
+#include <net/page_pool.h>
+#include <uapi/linux/bpf.h>
+
+struct stmmac_resources {
+ void __iomem *addr;
+ u8 mac[ETH_ALEN];
+ int wol_irq;
+ int lpi_irq;
+ int irq;
+ int sfty_ce_irq;
+ int sfty_ue_irq;
+ int rx_irq[MTL_MAX_RX_QUEUES];
+ int tx_irq[MTL_MAX_TX_QUEUES];
+};
+
+enum stmmac_txbuf_type {
+ STMMAC_TXBUF_T_SKB,
+ STMMAC_TXBUF_T_XDP_TX,
+ STMMAC_TXBUF_T_XDP_NDO,
+ STMMAC_TXBUF_T_XSK_TX,
+};
+
+struct stmmac_tx_info {
+ dma_addr_t buf;
+ bool map_as_page;
+ unsigned len;
+ bool last_segment;
+ bool is_jumbo;
+ enum stmmac_txbuf_type buf_type;
+};
+
+#define STMMAC_TBS_AVAIL BIT(0)
+#define STMMAC_TBS_EN BIT(1)
+
+/* Frequently used values are kept adjacent for cache effect */
+struct stmmac_tx_queue {
+ u32 tx_count_frames;
+ int tbs;
+ struct hrtimer txtimer;
+ u32 queue_index;
+ struct stmmac_priv *priv_data;
+ struct dma_extended_desc *dma_etx ____cacheline_aligned_in_smp;
+ struct dma_edesc *dma_entx;
+ struct dma_desc *dma_tx;
+ union {
+ struct sk_buff **tx_skbuff;
+ struct xdp_frame **xdpf;
+ };
+ struct stmmac_tx_info *tx_skbuff_dma;
+ struct xsk_buff_pool *xsk_pool;
+ u32 xsk_frames_done;
+ unsigned int cur_tx;
+ unsigned int dirty_tx;
+ dma_addr_t dma_tx_phy;
+ dma_addr_t tx_tail_addr;
+ u32 mss;
+};
+
+struct stmmac_rx_buffer {
+ union {
+ struct {
+ struct page *page;
+ dma_addr_t addr;
+ __u32 page_offset;
+ };
+ struct xdp_buff *xdp;
+ };
+ struct page *sec_page;
+ dma_addr_t sec_addr;
+};
+
+struct stmmac_rx_queue {
+ u32 rx_count_frames;
+ u32 queue_index;
+ struct xdp_rxq_info xdp_rxq;
+ struct xsk_buff_pool *xsk_pool;
+ struct page_pool *page_pool;
+ struct stmmac_rx_buffer *buf_pool;
+ struct stmmac_priv *priv_data;
+ struct dma_extended_desc *dma_erx;
+ struct dma_desc *dma_rx ____cacheline_aligned_in_smp;
+ unsigned int cur_rx;
+ unsigned int dirty_rx;
+ unsigned int buf_alloc_num;
+ u32 rx_zeroc_thresh;
+ dma_addr_t dma_rx_phy;
+ u32 rx_tail_addr;
+ unsigned int state_saved;
+ struct {
+ struct sk_buff *skb;
+ unsigned int len;
+ unsigned int error;
+ } state;
+};
+
+struct stmmac_channel {
+ struct napi_struct rx_napi ____cacheline_aligned_in_smp;
+ struct napi_struct tx_napi ____cacheline_aligned_in_smp;
+ struct napi_struct rxtx_napi ____cacheline_aligned_in_smp;
+ struct stmmac_priv *priv_data;
+ spinlock_t lock;
+ u32 index;
+};
+
+struct stmmac_tc_entry {
+ bool in_use;
+ bool in_hw;
+ bool is_last;
+ bool is_frag;
+ void *frag_ptr;
+ unsigned int table_pos;
+ u32 handle;
+ u32 prio;
+ struct {
+ u32 match_data;
+ u32 match_en;
+ u8 af:1;
+ u8 rf:1;
+ u8 im:1;
+ u8 nc:1;
+ u8 res1:4;
+ u8 frame_offset;
+ u8 ok_index;
+ u8 dma_ch_no;
+ u32 res2;
+ } __packed val;
+};
+
+#define STMMAC_PPS_MAX 4
+struct stmmac_pps_cfg {
+ bool available;
+ struct timespec64 start;
+ struct timespec64 period;
+};
+
+struct stmmac_rss {
+ int enable;
+ u8 key[STMMAC_RSS_HASH_KEY_SIZE];
+ u32 table[STMMAC_RSS_MAX_TABLE_SIZE];
+};
+
+#define STMMAC_FLOW_ACTION_DROP BIT(0)
+struct stmmac_flow_entry {
+ unsigned long cookie;
+ unsigned long action;
+ u8 ip_proto;
+ int in_use;
+ int idx;
+ int is_l4;
+};
+
+/* Rx Frame Steering */
+enum stmmac_rfs_type {
+ STMMAC_RFS_T_VLAN,
+ STMMAC_RFS_T_LLDP,
+ STMMAC_RFS_T_1588,
+ STMMAC_RFS_T_MAX,
+};
+
+struct stmmac_rfs_entry {
+ unsigned long cookie;
+ u16 etype;
+ int in_use;
+ int type;
+ int tc;
+};
+
+struct stmmac_dma_conf {
+ unsigned int dma_buf_sz;
+
+ /* RX Queue */
+ struct stmmac_rx_queue rx_queue[MTL_MAX_RX_QUEUES];
+ unsigned int dma_rx_size;
+
+ /* TX Queue */
+ struct stmmac_tx_queue tx_queue[MTL_MAX_TX_QUEUES];
+ unsigned int dma_tx_size;
+};
+
+struct stmmac_priv {
+ /* Frequently used values are kept adjacent for cache effect */
+ u32 tx_coal_frames[MTL_MAX_TX_QUEUES];
+ u32 tx_coal_timer[MTL_MAX_TX_QUEUES];
+ u32 rx_coal_frames[MTL_MAX_TX_QUEUES];
+
+ int hwts_tx_en;
+ bool tx_path_in_lpi_mode;
+ bool tso;
+ int sph;
+ int sph_cap;
+ u32 sarc_type;
+
+ unsigned int rx_copybreak;
+ u32 rx_riwt[MTL_MAX_TX_QUEUES];
+ int hwts_rx_en;
+
+ void __iomem *ioaddr;
+ struct net_device *dev;
+ struct device *device;
+ struct mac_device_info *hw;
+ int (*hwif_quirks)(struct stmmac_priv *priv);
+ struct mutex lock;
+
+ struct stmmac_dma_conf dma_conf;
+
+ /* Generic channel for NAPI */
+ struct stmmac_channel channel[STMMAC_CH_MAX];
+
+ int speed;
+ unsigned int flow_ctrl;
+ unsigned int pause;
+ struct mii_bus *mii;
+
+ struct phylink_config phylink_config;
+ struct phylink *phylink;
+
+ struct stmmac_extra_stats xstats ____cacheline_aligned_in_smp;
+ struct stmmac_safety_stats sstats;
+ struct plat_stmmacenet_data *plat;
+ struct dma_features dma_cap;
+ struct stmmac_counters mmc;
+ int hw_cap_support;
+ int synopsys_id;
+ u32 msg_enable;
+ int wolopts;
+ int wol_irq;
+ bool wol_irq_disabled;
+ int clk_csr;
+ struct timer_list eee_ctrl_timer;
+ int lpi_irq;
+ int eee_enabled;
+ int eee_active;
+ int tx_lpi_timer;
+ int tx_lpi_enabled;
+ int eee_tw_timer;
+ bool eee_sw_timer_en;
+ unsigned int mode;
+ unsigned int chain_mode;
+ int extend_desc;
+ struct hwtstamp_config tstamp_config;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_clock_ops;
+ unsigned int default_addend;
+ u32 sub_second_inc;
+ u32 systime_flags;
+ u32 adv_ts;
+ int use_riwt;
+ int irq_wake;
+ rwlock_t ptp_lock;
+ /* Protects auxiliary snapshot registers from concurrent access. */
+ struct mutex aux_ts_lock;
+ wait_queue_head_t tstamp_busy_wait;
+
+ void __iomem *mmcaddr;
+ void __iomem *ptpaddr;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ int sfty_ce_irq;
+ int sfty_ue_irq;
+ int rx_irq[MTL_MAX_RX_QUEUES];
+ int tx_irq[MTL_MAX_TX_QUEUES];
+ /*irq name */
+ char int_name_mac[IFNAMSIZ + 9];
+ char int_name_wol[IFNAMSIZ + 9];
+ char int_name_lpi[IFNAMSIZ + 9];
+ char int_name_sfty_ce[IFNAMSIZ + 10];
+ char int_name_sfty_ue[IFNAMSIZ + 10];
+ char int_name_rx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 14];
+ char int_name_tx_irq[MTL_MAX_TX_QUEUES][IFNAMSIZ + 18];
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbgfs_dir;
+#endif
+
+ unsigned long state;
+ struct workqueue_struct *wq;
+ struct work_struct service_task;
+
+ /* Workqueue for handling FPE hand-shaking */
+ unsigned long fpe_task_state;
+ struct workqueue_struct *fpe_wq;
+ struct work_struct fpe_task;
+ char wq_name[IFNAMSIZ + 4];
+
+ /* TC Handling */
+ unsigned int tc_entries_max;
+ unsigned int tc_off_max;
+ struct stmmac_tc_entry *tc_entries;
+ unsigned int flow_entries_max;
+ struct stmmac_flow_entry *flow_entries;
+ unsigned int rfs_entries_max[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_cnt[STMMAC_RFS_T_MAX];
+ unsigned int rfs_entries_total;
+ struct stmmac_rfs_entry *rfs_entries;
+
+ /* Pulse Per Second output */
+ struct stmmac_pps_cfg pps[STMMAC_PPS_MAX];
+
+ /* Receive Side Scaling */
+ struct stmmac_rss rss;
+
+ /* XDP BPF Program */
+ unsigned long *af_xdp_zc_qps;
+ struct bpf_prog *xdp_prog;
+};
+
+enum stmmac_state {
+ STMMAC_DOWN,
+ STMMAC_RESET_REQUESTED,
+ STMMAC_RESETING,
+ STMMAC_SERVICE_SCHED,
+};
+
+int stmmac_mdio_unregister(struct net_device *ndev);
+int stmmac_mdio_register(struct net_device *ndev);
+int stmmac_mdio_reset(struct mii_bus *mii);
+int stmmac_xpcs_setup(struct mii_bus *mii);
+void stmmac_set_ethtool_ops(struct net_device *netdev);
+
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags);
+void stmmac_ptp_register(struct stmmac_priv *priv);
+void stmmac_ptp_unregister(struct stmmac_priv *priv);
+int stmmac_xdp_open(struct net_device *dev);
+void stmmac_xdp_release(struct net_device *dev);
+int stmmac_resume(struct device *dev);
+int stmmac_suspend(struct device *dev);
+int stmmac_dvr_remove(struct device *dev);
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res);
+void stmmac_disable_eee_mode(struct stmmac_priv *priv);
+bool stmmac_eee_init(struct stmmac_priv *priv);
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt);
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size);
+int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled);
+void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable);
+
+static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv)
+{
+ return !!priv->xdp_prog;
+}
+
+static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
+{
+ if (stmmac_xdp_is_enabled(priv))
+ return XDP_PACKET_HEADROOM;
+
+ return 0;
+}
+
+void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue);
+void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue);
+void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue);
+void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue);
+int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags);
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time);
+
+#if IS_ENABLED(CONFIG_STMMAC_SELFTESTS)
+void stmmac_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf);
+void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data);
+int stmmac_selftest_get_count(struct stmmac_priv *priv);
+#else
+static inline void stmmac_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ /* Not enabled */
+}
+static inline void stmmac_selftest_get_strings(struct stmmac_priv *priv,
+ u8 *data)
+{
+ /* Not enabled */
+}
+static inline int stmmac_selftest_get_count(struct stmmac_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_STMMAC_SELFTESTS */
+
+#endif /* __STMMAC_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
new file mode 100644
index 000000000..f03aa8a0b
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -0,0 +1,1202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ STMMAC Ethtool support
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/interrupt.h>
+#include <linux/mii.h>
+#include <linux/phylink.h>
+#include <linux/net_tstamp.h>
+#include <asm/io.h>
+
+#include "stmmac.h"
+#include "dwmac_dma.h"
+#include "dwxgmac2.h"
+
+#define REG_SPACE_SIZE 0x1060
+#define GMAC4_REG_SPACE_SIZE 0x116C
+#define MAC100_ETHTOOL_NAME "st_mac100"
+#define GMAC_ETHTOOL_NAME "st_gmac"
+#define XGMAC_ETHTOOL_NAME "st_xgmac"
+
+/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h
+ *
+ * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the
+ * same time due to the conflicting macro names.
+ */
+#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100
+
+#define ETHTOOL_DMA_OFFSET 55
+
+struct stmmac_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int sizeof_stat;
+ int stat_offset;
+};
+
+#define STMMAC_STAT(m) \
+ { #m, sizeof_field(struct stmmac_extra_stats, m), \
+ offsetof(struct stmmac_priv, xstats.m)}
+
+static const struct stmmac_stats stmmac_gstrings_stats[] = {
+ /* Transmit errors */
+ STMMAC_STAT(tx_underflow),
+ STMMAC_STAT(tx_carrier),
+ STMMAC_STAT(tx_losscarrier),
+ STMMAC_STAT(vlan_tag),
+ STMMAC_STAT(tx_deferred),
+ STMMAC_STAT(tx_vlan),
+ STMMAC_STAT(tx_jabber),
+ STMMAC_STAT(tx_frame_flushed),
+ STMMAC_STAT(tx_payload_error),
+ STMMAC_STAT(tx_ip_header_error),
+ /* Receive errors */
+ STMMAC_STAT(rx_desc),
+ STMMAC_STAT(sa_filter_fail),
+ STMMAC_STAT(overflow_error),
+ STMMAC_STAT(ipc_csum_error),
+ STMMAC_STAT(rx_collision),
+ STMMAC_STAT(rx_crc_errors),
+ STMMAC_STAT(dribbling_bit),
+ STMMAC_STAT(rx_length),
+ STMMAC_STAT(rx_mii),
+ STMMAC_STAT(rx_multicast),
+ STMMAC_STAT(rx_gmac_overflow),
+ STMMAC_STAT(rx_watchdog),
+ STMMAC_STAT(da_rx_filter_fail),
+ STMMAC_STAT(sa_rx_filter_fail),
+ STMMAC_STAT(rx_missed_cntr),
+ STMMAC_STAT(rx_overflow_cntr),
+ STMMAC_STAT(rx_vlan),
+ STMMAC_STAT(rx_split_hdr_pkt_n),
+ /* Tx/Rx IRQ error info */
+ STMMAC_STAT(tx_undeflow_irq),
+ STMMAC_STAT(tx_process_stopped_irq),
+ STMMAC_STAT(tx_jabber_irq),
+ STMMAC_STAT(rx_overflow_irq),
+ STMMAC_STAT(rx_buf_unav_irq),
+ STMMAC_STAT(rx_process_stopped_irq),
+ STMMAC_STAT(rx_watchdog_irq),
+ STMMAC_STAT(tx_early_irq),
+ STMMAC_STAT(fatal_bus_error_irq),
+ /* Tx/Rx IRQ Events */
+ STMMAC_STAT(rx_early_irq),
+ STMMAC_STAT(threshold),
+ STMMAC_STAT(tx_pkt_n),
+ STMMAC_STAT(rx_pkt_n),
+ STMMAC_STAT(normal_irq_n),
+ STMMAC_STAT(rx_normal_irq_n),
+ STMMAC_STAT(napi_poll),
+ STMMAC_STAT(tx_normal_irq_n),
+ STMMAC_STAT(tx_clean),
+ STMMAC_STAT(tx_set_ic_bit),
+ STMMAC_STAT(irq_receive_pmt_irq_n),
+ /* MMC info */
+ STMMAC_STAT(mmc_tx_irq_n),
+ STMMAC_STAT(mmc_rx_irq_n),
+ STMMAC_STAT(mmc_rx_csum_offload_irq_n),
+ /* EEE */
+ STMMAC_STAT(irq_tx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_tx_path_exit_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_in_lpi_mode_n),
+ STMMAC_STAT(irq_rx_path_exit_lpi_mode_n),
+ STMMAC_STAT(phy_eee_wakeup_error_n),
+ /* Extended RDES status */
+ STMMAC_STAT(ip_hdr_err),
+ STMMAC_STAT(ip_payload_err),
+ STMMAC_STAT(ip_csum_bypassed),
+ STMMAC_STAT(ipv4_pkt_rcvd),
+ STMMAC_STAT(ipv6_pkt_rcvd),
+ STMMAC_STAT(no_ptp_rx_msg_type_ext),
+ STMMAC_STAT(ptp_rx_msg_type_sync),
+ STMMAC_STAT(ptp_rx_msg_type_follow_up),
+ STMMAC_STAT(ptp_rx_msg_type_delay_req),
+ STMMAC_STAT(ptp_rx_msg_type_delay_resp),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_req),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_resp),
+ STMMAC_STAT(ptp_rx_msg_type_pdelay_follow_up),
+ STMMAC_STAT(ptp_rx_msg_type_announce),
+ STMMAC_STAT(ptp_rx_msg_type_management),
+ STMMAC_STAT(ptp_rx_msg_pkt_reserved_type),
+ STMMAC_STAT(ptp_frame_type),
+ STMMAC_STAT(ptp_ver),
+ STMMAC_STAT(timestamp_dropped),
+ STMMAC_STAT(av_pkt_rcvd),
+ STMMAC_STAT(av_tagged_pkt_rcvd),
+ STMMAC_STAT(vlan_tag_priority_val),
+ STMMAC_STAT(l3_filter_match),
+ STMMAC_STAT(l4_filter_match),
+ STMMAC_STAT(l3_l4_filter_no_match),
+ /* PCS */
+ STMMAC_STAT(irq_pcs_ane_n),
+ STMMAC_STAT(irq_pcs_link_n),
+ STMMAC_STAT(irq_rgmii_n),
+ /* DEBUG */
+ STMMAC_STAT(mtl_tx_status_fifo_full),
+ STMMAC_STAT(mtl_tx_fifo_not_empty),
+ STMMAC_STAT(mmtl_fifo_ctrl),
+ STMMAC_STAT(mtl_tx_fifo_read_ctrl_write),
+ STMMAC_STAT(mtl_tx_fifo_read_ctrl_wait),
+ STMMAC_STAT(mtl_tx_fifo_read_ctrl_read),
+ STMMAC_STAT(mtl_tx_fifo_read_ctrl_idle),
+ STMMAC_STAT(mac_tx_in_pause),
+ STMMAC_STAT(mac_tx_frame_ctrl_xfer),
+ STMMAC_STAT(mac_tx_frame_ctrl_idle),
+ STMMAC_STAT(mac_tx_frame_ctrl_wait),
+ STMMAC_STAT(mac_tx_frame_ctrl_pause),
+ STMMAC_STAT(mac_gmii_tx_proto_engine),
+ STMMAC_STAT(mtl_rx_fifo_fill_level_full),
+ STMMAC_STAT(mtl_rx_fifo_fill_above_thresh),
+ STMMAC_STAT(mtl_rx_fifo_fill_below_thresh),
+ STMMAC_STAT(mtl_rx_fifo_fill_level_empty),
+ STMMAC_STAT(mtl_rx_fifo_read_ctrl_flush),
+ STMMAC_STAT(mtl_rx_fifo_read_ctrl_read_data),
+ STMMAC_STAT(mtl_rx_fifo_read_ctrl_status),
+ STMMAC_STAT(mtl_rx_fifo_read_ctrl_idle),
+ STMMAC_STAT(mtl_rx_fifo_ctrl_active),
+ STMMAC_STAT(mac_rx_frame_ctrl_fifo),
+ STMMAC_STAT(mac_gmii_rx_proto_engine),
+ /* TSO */
+ STMMAC_STAT(tx_tso_frames),
+ STMMAC_STAT(tx_tso_nfrags),
+ /* EST */
+ STMMAC_STAT(mtl_est_cgce),
+ STMMAC_STAT(mtl_est_hlbs),
+ STMMAC_STAT(mtl_est_hlbf),
+ STMMAC_STAT(mtl_est_btre),
+ STMMAC_STAT(mtl_est_btrlm),
+};
+#define STMMAC_STATS_LEN ARRAY_SIZE(stmmac_gstrings_stats)
+
+/* HW MAC Management counters (if supported) */
+#define STMMAC_MMC_STAT(m) \
+ { #m, sizeof_field(struct stmmac_counters, m), \
+ offsetof(struct stmmac_priv, mmc.m)}
+
+static const struct stmmac_stats stmmac_mmc[] = {
+ STMMAC_MMC_STAT(mmc_tx_octetcount_gb),
+ STMMAC_MMC_STAT(mmc_tx_framecount_gb),
+ STMMAC_MMC_STAT(mmc_tx_broadcastframe_g),
+ STMMAC_MMC_STAT(mmc_tx_multicastframe_g),
+ STMMAC_MMC_STAT(mmc_tx_64_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_65_to_127_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_128_to_255_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_256_to_511_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_512_to_1023_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_1024_to_max_octets_gb),
+ STMMAC_MMC_STAT(mmc_tx_unicast_gb),
+ STMMAC_MMC_STAT(mmc_tx_multicast_gb),
+ STMMAC_MMC_STAT(mmc_tx_broadcast_gb),
+ STMMAC_MMC_STAT(mmc_tx_underflow_error),
+ STMMAC_MMC_STAT(mmc_tx_singlecol_g),
+ STMMAC_MMC_STAT(mmc_tx_multicol_g),
+ STMMAC_MMC_STAT(mmc_tx_deferred),
+ STMMAC_MMC_STAT(mmc_tx_latecol),
+ STMMAC_MMC_STAT(mmc_tx_exesscol),
+ STMMAC_MMC_STAT(mmc_tx_carrier_error),
+ STMMAC_MMC_STAT(mmc_tx_octetcount_g),
+ STMMAC_MMC_STAT(mmc_tx_framecount_g),
+ STMMAC_MMC_STAT(mmc_tx_excessdef),
+ STMMAC_MMC_STAT(mmc_tx_pause_frame),
+ STMMAC_MMC_STAT(mmc_tx_vlan_frame_g),
+ STMMAC_MMC_STAT(mmc_rx_framecount_gb),
+ STMMAC_MMC_STAT(mmc_rx_octetcount_gb),
+ STMMAC_MMC_STAT(mmc_rx_octetcount_g),
+ STMMAC_MMC_STAT(mmc_rx_broadcastframe_g),
+ STMMAC_MMC_STAT(mmc_rx_multicastframe_g),
+ STMMAC_MMC_STAT(mmc_rx_crc_error),
+ STMMAC_MMC_STAT(mmc_rx_align_error),
+ STMMAC_MMC_STAT(mmc_rx_run_error),
+ STMMAC_MMC_STAT(mmc_rx_jabber_error),
+ STMMAC_MMC_STAT(mmc_rx_undersize_g),
+ STMMAC_MMC_STAT(mmc_rx_oversize_g),
+ STMMAC_MMC_STAT(mmc_rx_64_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_65_to_127_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_128_to_255_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_256_to_511_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_512_to_1023_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_1024_to_max_octets_gb),
+ STMMAC_MMC_STAT(mmc_rx_unicast_g),
+ STMMAC_MMC_STAT(mmc_rx_length_error),
+ STMMAC_MMC_STAT(mmc_rx_autofrangetype),
+ STMMAC_MMC_STAT(mmc_rx_pause_frames),
+ STMMAC_MMC_STAT(mmc_rx_fifo_overflow),
+ STMMAC_MMC_STAT(mmc_rx_vlan_frames_gb),
+ STMMAC_MMC_STAT(mmc_rx_watchdog_error),
+ STMMAC_MMC_STAT(mmc_rx_ipc_intr_mask),
+ STMMAC_MMC_STAT(mmc_rx_ipc_intr),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_gd),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_hderr),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_nopay),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_frag),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_hderr_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_nopay_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_frag_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv4_udsbl_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_hderr_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_nopay_octets),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_gd),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_hderr),
+ STMMAC_MMC_STAT(mmc_rx_ipv6_nopay),
+ STMMAC_MMC_STAT(mmc_rx_udp_gd),
+ STMMAC_MMC_STAT(mmc_rx_udp_err),
+ STMMAC_MMC_STAT(mmc_rx_tcp_gd),
+ STMMAC_MMC_STAT(mmc_rx_tcp_err),
+ STMMAC_MMC_STAT(mmc_rx_icmp_gd),
+ STMMAC_MMC_STAT(mmc_rx_icmp_err),
+ STMMAC_MMC_STAT(mmc_rx_udp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_udp_err_octets),
+ STMMAC_MMC_STAT(mmc_rx_tcp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_tcp_err_octets),
+ STMMAC_MMC_STAT(mmc_rx_icmp_gd_octets),
+ STMMAC_MMC_STAT(mmc_rx_icmp_err_octets),
+ STMMAC_MMC_STAT(mmc_tx_fpe_fragment_cntr),
+ STMMAC_MMC_STAT(mmc_tx_hold_req_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_smd_err_cntr),
+ STMMAC_MMC_STAT(mmc_rx_packet_assembly_ok_cntr),
+ STMMAC_MMC_STAT(mmc_rx_fpe_fragment_cntr),
+};
+#define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc)
+
+static const char stmmac_qstats_tx_string[][ETH_GSTRING_LEN] = {
+ "tx_pkt_n",
+ "tx_irq_n",
+#define STMMAC_TXQ_STATS ARRAY_SIZE(stmmac_qstats_tx_string)
+};
+
+static const char stmmac_qstats_rx_string[][ETH_GSTRING_LEN] = {
+ "rx_pkt_n",
+ "rx_irq_n",
+#define STMMAC_RXQ_STATS ARRAY_SIZE(stmmac_qstats_rx_string)
+};
+
+static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->has_gmac || priv->plat->has_gmac4)
+ strscpy(info->driver, GMAC_ETHTOOL_NAME, sizeof(info->driver));
+ else if (priv->plat->has_xgmac)
+ strscpy(info->driver, XGMAC_ETHTOOL_NAME, sizeof(info->driver));
+ else
+ strscpy(info->driver, MAC100_ETHTOOL_NAME,
+ sizeof(info->driver));
+
+ if (priv->plat->pdev) {
+ strscpy(info->bus_info, pci_name(priv->plat->pdev),
+ sizeof(info->bus_info));
+ }
+}
+
+static int stmmac_ethtool_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII) {
+ struct rgmii_adv adv;
+ u32 supported, advertising, lp_advertising;
+
+ if (!priv->xstats.pcs_link) {
+ cmd->base.speed = SPEED_UNKNOWN;
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ return 0;
+ }
+ cmd->base.duplex = priv->xstats.pcs_duplex;
+
+ cmd->base.speed = priv->xstats.pcs_speed;
+
+ /* Get and convert ADV/LP_ADV from the HW AN registers */
+ if (stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv))
+ return -EOPNOTSUPP; /* should never happen indeed */
+
+ /* Encoding of PSE bits is defined in 802.3z, 37.2.1.4 */
+
+ ethtool_convert_link_mode_to_legacy_u32(
+ &supported, cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(
+ &advertising, cmd->link_modes.advertising);
+ ethtool_convert_link_mode_to_legacy_u32(
+ &lp_advertising, cmd->link_modes.lp_advertising);
+
+ if (adv.pause & STMMAC_PCS_PAUSE)
+ advertising |= ADVERTISED_Pause;
+ if (adv.pause & STMMAC_PCS_ASYM_PAUSE)
+ advertising |= ADVERTISED_Asym_Pause;
+ if (adv.lp_pause & STMMAC_PCS_PAUSE)
+ lp_advertising |= ADVERTISED_Pause;
+ if (adv.lp_pause & STMMAC_PCS_ASYM_PAUSE)
+ lp_advertising |= ADVERTISED_Asym_Pause;
+
+ /* Reg49[3] always set because ANE is always supported */
+ cmd->base.autoneg = ADVERTISED_Autoneg;
+ supported |= SUPPORTED_Autoneg;
+ advertising |= ADVERTISED_Autoneg;
+ lp_advertising |= ADVERTISED_Autoneg;
+
+ if (adv.duplex) {
+ supported |= (SUPPORTED_1000baseT_Full |
+ SUPPORTED_100baseT_Full |
+ SUPPORTED_10baseT_Full);
+ advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ } else {
+ supported |= (SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+ advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ }
+ if (adv.lp_duplex)
+ lp_advertising |= (ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Full);
+ else
+ lp_advertising |= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_10baseT_Half);
+ cmd->base.port = PORT_OTHER;
+
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.supported, supported);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.advertising, advertising);
+ ethtool_convert_legacy_u32_to_link_mode(
+ cmd->link_modes.lp_advertising, lp_advertising);
+
+ return 0;
+ }
+
+ return phylink_ethtool_ksettings_get(priv->phylink, cmd);
+}
+
+static int
+stmmac_ethtool_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->hw->pcs & STMMAC_PCS_RGMII ||
+ priv->hw->pcs & STMMAC_PCS_SGMII) {
+ u32 mask = ADVERTISED_Autoneg | ADVERTISED_Pause;
+
+ /* Only support ANE */
+ if (cmd->base.autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ mask &= (ADVERTISED_1000baseT_Half |
+ ADVERTISED_1000baseT_Full |
+ ADVERTISED_100baseT_Half |
+ ADVERTISED_100baseT_Full |
+ ADVERTISED_10baseT_Half |
+ ADVERTISED_10baseT_Full);
+
+ mutex_lock(&priv->lock);
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+ mutex_unlock(&priv->lock);
+
+ return 0;
+ }
+
+ return phylink_ethtool_ksettings_set(priv->phylink, cmd);
+}
+
+static u32 stmmac_ethtool_getmsglevel(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ return priv->msg_enable;
+}
+
+static void stmmac_ethtool_setmsglevel(struct net_device *dev, u32 level)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ priv->msg_enable = level;
+
+}
+
+static int stmmac_check_if_running(struct net_device *dev)
+{
+ if (!netif_running(dev))
+ return -EBUSY;
+ return 0;
+}
+
+static int stmmac_ethtool_get_regs_len(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->has_xgmac)
+ return XGMAC_REGSIZE * 4;
+ else if (priv->plat->has_gmac4)
+ return GMAC4_REG_SPACE_SIZE;
+ return REG_SPACE_SIZE;
+}
+
+static void stmmac_ethtool_gregs(struct net_device *dev,
+ struct ethtool_regs *regs, void *space)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 *reg_space = (u32 *) space;
+
+ stmmac_dump_mac_regs(priv, priv->hw, reg_space);
+ stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space);
+
+ /* Copy DMA registers to where ethtool expects them */
+ if (priv->plat->has_gmac4) {
+ /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[GMAC4_DMA_CHAN_BASE_ADDR / 4],
+ NUM_DWMAC4_DMA_REGS * 4);
+ } else if (!priv->plat->has_xgmac) {
+ memcpy(&reg_space[ETHTOOL_DMA_OFFSET],
+ &reg_space[DMA_BUS_MODE / 4],
+ NUM_DWMAC1000_DMA_REGS * 4);
+ }
+}
+
+static int stmmac_nway_reset(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return phylink_ethtool_nway_reset(priv->phylink);
+}
+
+static void stmmac_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ ring->rx_max_pending = DMA_MAX_RX_SIZE;
+ ring->tx_max_pending = DMA_MAX_TX_SIZE;
+ ring->rx_pending = priv->dma_conf.dma_rx_size;
+ ring->tx_pending = priv->dma_conf.dma_tx_size;
+}
+
+static int stmmac_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending ||
+ ring->rx_pending < DMA_MIN_RX_SIZE ||
+ ring->rx_pending > DMA_MAX_RX_SIZE ||
+ !is_power_of_2(ring->rx_pending) ||
+ ring->tx_pending < DMA_MIN_TX_SIZE ||
+ ring->tx_pending > DMA_MAX_TX_SIZE ||
+ !is_power_of_2(ring->tx_pending))
+ return -EINVAL;
+
+ return stmmac_reinit_ringparam(netdev, ring->rx_pending,
+ ring->tx_pending);
+}
+
+static void
+stmmac_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ struct rgmii_adv adv_lp;
+
+ if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
+ pause->autoneg = 1;
+ if (!adv_lp.pause)
+ return;
+ } else {
+ phylink_ethtool_get_pauseparam(priv->phylink, pause);
+ }
+}
+
+static int
+stmmac_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ struct rgmii_adv adv_lp;
+
+ if (priv->hw->pcs && !stmmac_pcs_get_adv_lp(priv, priv->ioaddr, &adv_lp)) {
+ pause->autoneg = 1;
+ if (!adv_lp.pause)
+ return -EOPNOTSUPP;
+ return 0;
+ } else {
+ return phylink_ethtool_set_pauseparam(priv->phylink, pause);
+ }
+}
+
+static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ int q, stat;
+ char *p;
+
+ for (q = 0; q < tx_cnt; q++) {
+ p = (char *)priv + offsetof(struct stmmac_priv,
+ xstats.txq_stats[q].tx_pkt_n);
+ for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
+ *data++ = (*(unsigned long *)p);
+ p += sizeof(unsigned long);
+ }
+ }
+ for (q = 0; q < rx_cnt; q++) {
+ p = (char *)priv + offsetof(struct stmmac_priv,
+ xstats.rxq_stats[q].rx_pkt_n);
+ for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
+ *data++ = (*(unsigned long *)p);
+ p += sizeof(unsigned long);
+ }
+ }
+}
+
+static void stmmac_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *dummy, u64 *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ unsigned long count;
+ int i, j = 0, ret;
+
+ if (priv->dma_cap.asp) {
+ for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
+ if (!stmmac_safety_feat_dump(priv, &priv->sstats, i,
+ &count, NULL))
+ data[j++] = count;
+ }
+ }
+
+ /* Update the DMA HW counters for dwmac10/100 */
+ ret = stmmac_dma_diagnostic_fr(priv, &dev->stats, (void *) &priv->xstats,
+ priv->ioaddr);
+ if (ret) {
+ /* If supported, for new GMAC chips expose the MMC counters */
+ if (priv->dma_cap.rmon) {
+ stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
+
+ for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+ char *p;
+ p = (char *)priv + stmmac_mmc[i].stat_offset;
+
+ data[j++] = (stmmac_mmc[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) :
+ (*(u32 *)p);
+ }
+ }
+ if (priv->eee_enabled) {
+ int val = phylink_get_eee_err(priv->phylink);
+ if (val)
+ priv->xstats.phy_eee_wakeup_error_n = val;
+ }
+
+ if (priv->synopsys_id >= DWMAC_CORE_3_50)
+ stmmac_mac_debug(priv, priv->ioaddr,
+ (void *)&priv->xstats,
+ rx_queues_count, tx_queues_count);
+ }
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ char *p = (char *)priv + stmmac_gstrings_stats[i].stat_offset;
+ data[j++] = (stmmac_gstrings_stats[i].sizeof_stat ==
+ sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p);
+ }
+ stmmac_get_per_qstats(priv, &data[j]);
+}
+
+static int stmmac_get_sset_count(struct net_device *netdev, int sset)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ int i, len, safety_len = 0;
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ len = STMMAC_STATS_LEN +
+ STMMAC_TXQ_STATS * tx_cnt +
+ STMMAC_RXQ_STATS * rx_cnt;
+
+ if (priv->dma_cap.rmon)
+ len += STMMAC_MMC_STATS_LEN;
+ if (priv->dma_cap.asp) {
+ for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
+ if (!stmmac_safety_feat_dump(priv,
+ &priv->sstats, i,
+ NULL, NULL))
+ safety_len++;
+ }
+
+ len += safety_len;
+ }
+
+ return len;
+ case ETH_SS_TEST:
+ return stmmac_selftest_get_count(priv);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void stmmac_get_qstats_string(struct stmmac_priv *priv, u8 *data)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ int q, stat;
+
+ for (q = 0; q < tx_cnt; q++) {
+ for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) {
+ snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
+ stmmac_qstats_tx_string[stat]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+ for (q = 0; q < rx_cnt; q++) {
+ for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) {
+ snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q,
+ stmmac_qstats_rx_string[stat]);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+}
+
+static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+ int i;
+ u8 *p = data;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ if (priv->dma_cap.asp) {
+ for (i = 0; i < STMMAC_SAFETY_FEAT_SIZE; i++) {
+ const char *desc;
+ if (!stmmac_safety_feat_dump(priv,
+ &priv->sstats, i,
+ NULL, &desc)) {
+ memcpy(p, desc, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ }
+ }
+ if (priv->dma_cap.rmon)
+ for (i = 0; i < STMMAC_MMC_STATS_LEN; i++) {
+ memcpy(p, stmmac_mmc[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < STMMAC_STATS_LEN; i++) {
+ memcpy(p, stmmac_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ stmmac_get_qstats_string(priv, p);
+ break;
+ case ETH_SS_TEST:
+ stmmac_selftest_get_strings(priv, p);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+/* Currently only support WOL through Magic packet. */
+static void stmmac_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->plat->pmt)
+ return phylink_ethtool_get_wol(priv->phylink, wol);
+
+ mutex_lock(&priv->lock);
+ if (device_can_wakeup(priv->device)) {
+ wol->supported = WAKE_MAGIC | WAKE_UCAST;
+ if (priv->hw_cap_support && !priv->dma_cap.pmt_magic_frame)
+ wol->supported &= ~WAKE_MAGIC;
+ wol->wolopts = priv->wolopts;
+ }
+ mutex_unlock(&priv->lock);
+}
+
+static int stmmac_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 support = WAKE_MAGIC | WAKE_UCAST;
+
+ if (!device_can_wakeup(priv->device))
+ return -EOPNOTSUPP;
+
+ if (!priv->plat->pmt) {
+ int ret = phylink_ethtool_set_wol(priv->phylink, wol);
+
+ if (!ret)
+ device_set_wakeup_enable(priv->device, !!wol->wolopts);
+ return ret;
+ }
+
+ /* By default almost all GMAC devices support the WoL via
+ * magic frame but we can disable it if the HW capability
+ * register shows no support for pmt_magic_frame. */
+ if ((priv->hw_cap_support) && (!priv->dma_cap.pmt_magic_frame))
+ wol->wolopts &= ~WAKE_MAGIC;
+
+ if (wol->wolopts & ~support)
+ return -EINVAL;
+
+ if (wol->wolopts) {
+ pr_info("stmmac: wakeup enable\n");
+ device_set_wakeup_enable(priv->device, 1);
+ /* Avoid unbalanced enable_irq_wake calls */
+ if (priv->wol_irq_disabled)
+ enable_irq_wake(priv->wol_irq);
+ priv->wol_irq_disabled = false;
+ } else {
+ device_set_wakeup_enable(priv->device, 0);
+ /* Avoid unbalanced disable_irq_wake calls */
+ if (!priv->wol_irq_disabled)
+ disable_irq_wake(priv->wol_irq);
+ priv->wol_irq_disabled = true;
+ }
+
+ mutex_lock(&priv->lock);
+ priv->wolopts = wol->wolopts;
+ mutex_unlock(&priv->lock);
+
+ return 0;
+}
+
+static int stmmac_ethtool_op_get_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->dma_cap.eee)
+ return -EOPNOTSUPP;
+
+ edata->eee_enabled = priv->eee_enabled;
+ edata->eee_active = priv->eee_active;
+ edata->tx_lpi_timer = priv->tx_lpi_timer;
+ edata->tx_lpi_enabled = priv->tx_lpi_enabled;
+
+ return phylink_ethtool_get_eee(priv->phylink, edata);
+}
+
+static int stmmac_ethtool_op_set_eee(struct net_device *dev,
+ struct ethtool_eee *edata)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ if (!priv->dma_cap.eee)
+ return -EOPNOTSUPP;
+
+ if (priv->tx_lpi_enabled != edata->tx_lpi_enabled)
+ netdev_warn(priv->dev,
+ "Setting EEE tx-lpi is not supported\n");
+
+ if (!edata->eee_enabled)
+ stmmac_disable_eee_mode(priv);
+
+ ret = phylink_ethtool_set_eee(priv->phylink, edata);
+ if (ret)
+ return ret;
+
+ if (edata->eee_enabled &&
+ priv->tx_lpi_timer != edata->tx_lpi_timer) {
+ priv->tx_lpi_timer = edata->tx_lpi_timer;
+ stmmac_eee_init(priv);
+ }
+
+ return 0;
+}
+
+static u32 stmmac_usec2riwt(u32 usec, struct stmmac_priv *priv)
+{
+ unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
+
+ if (!clk) {
+ clk = priv->plat->clk_ref_rate;
+ if (!clk)
+ return 0;
+ }
+
+ return (usec * (clk / 1000000)) / 256;
+}
+
+static u32 stmmac_riwt2usec(u32 riwt, struct stmmac_priv *priv)
+{
+ unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
+
+ if (!clk) {
+ clk = priv->plat->clk_ref_rate;
+ if (!clk)
+ return 0;
+ }
+
+ return (riwt * 256) / (clk / 1000000);
+}
+
+static int __stmmac_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 max_cnt;
+ u32 rx_cnt;
+ u32 tx_cnt;
+
+ rx_cnt = priv->plat->rx_queues_to_use;
+ tx_cnt = priv->plat->tx_queues_to_use;
+ max_cnt = max(rx_cnt, tx_cnt);
+
+ if (queue < 0)
+ queue = 0;
+ else if (queue >= max_cnt)
+ return -EINVAL;
+
+ if (queue < tx_cnt) {
+ ec->tx_coalesce_usecs = priv->tx_coal_timer[queue];
+ ec->tx_max_coalesced_frames = priv->tx_coal_frames[queue];
+ } else {
+ ec->tx_coalesce_usecs = 0;
+ ec->tx_max_coalesced_frames = 0;
+ }
+
+ if (priv->use_riwt && queue < rx_cnt) {
+ ec->rx_max_coalesced_frames = priv->rx_coal_frames[queue];
+ ec->rx_coalesce_usecs = stmmac_riwt2usec(priv->rx_riwt[queue],
+ priv);
+ } else {
+ ec->rx_max_coalesced_frames = 0;
+ ec->rx_coalesce_usecs = 0;
+ }
+
+ return 0;
+}
+
+static int stmmac_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return __stmmac_get_coalesce(dev, ec, -1);
+}
+
+static int stmmac_get_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __stmmac_get_coalesce(dev, ec, queue);
+}
+
+static int __stmmac_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ int queue)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ bool all_queues = false;
+ unsigned int rx_riwt;
+ u32 max_cnt;
+ u32 rx_cnt;
+ u32 tx_cnt;
+
+ rx_cnt = priv->plat->rx_queues_to_use;
+ tx_cnt = priv->plat->tx_queues_to_use;
+ max_cnt = max(rx_cnt, tx_cnt);
+
+ if (queue < 0)
+ all_queues = true;
+ else if (queue >= max_cnt)
+ return -EINVAL;
+
+ if (priv->use_riwt && (ec->rx_coalesce_usecs > 0)) {
+ rx_riwt = stmmac_usec2riwt(ec->rx_coalesce_usecs, priv);
+
+ if ((rx_riwt > MAX_DMA_RIWT) || (rx_riwt < MIN_DMA_RIWT))
+ return -EINVAL;
+
+ if (all_queues) {
+ int i;
+
+ for (i = 0; i < rx_cnt; i++) {
+ priv->rx_riwt[i] = rx_riwt;
+ stmmac_rx_watchdog(priv, priv->ioaddr,
+ rx_riwt, i);
+ priv->rx_coal_frames[i] =
+ ec->rx_max_coalesced_frames;
+ }
+ } else if (queue < rx_cnt) {
+ priv->rx_riwt[queue] = rx_riwt;
+ stmmac_rx_watchdog(priv, priv->ioaddr,
+ rx_riwt, queue);
+ priv->rx_coal_frames[queue] =
+ ec->rx_max_coalesced_frames;
+ }
+ }
+
+ if ((ec->tx_coalesce_usecs == 0) &&
+ (ec->tx_max_coalesced_frames == 0))
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs > STMMAC_MAX_COAL_TX_TICK) ||
+ (ec->tx_max_coalesced_frames > STMMAC_TX_MAX_FRAMES))
+ return -EINVAL;
+
+ if (all_queues) {
+ int i;
+
+ for (i = 0; i < tx_cnt; i++) {
+ priv->tx_coal_frames[i] =
+ ec->tx_max_coalesced_frames;
+ priv->tx_coal_timer[i] =
+ ec->tx_coalesce_usecs;
+ }
+ } else if (queue < tx_cnt) {
+ priv->tx_coal_frames[queue] =
+ ec->tx_max_coalesced_frames;
+ priv->tx_coal_timer[queue] =
+ ec->tx_coalesce_usecs;
+ }
+
+ return 0;
+}
+
+static int stmmac_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ return __stmmac_set_coalesce(dev, ec, -1);
+}
+
+static int stmmac_set_per_queue_coalesce(struct net_device *dev, u32 queue,
+ struct ethtool_coalesce *ec)
+{
+ return __stmmac_set_coalesce(dev, ec, queue);
+}
+
+static int stmmac_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (rxnfc->cmd) {
+ case ETHTOOL_GRXRINGS:
+ rxnfc->data = priv->plat->rx_queues_to_use;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static u32 stmmac_get_rxfh_key_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return sizeof(priv->rss.key);
+}
+
+static u32 stmmac_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ return ARRAY_SIZE(priv->rss.table);
+}
+
+static int stmmac_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ indir[i] = priv->rss.table[i];
+ }
+
+ if (key)
+ memcpy(key, priv->rss.key, sizeof(priv->rss.key));
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int stmmac_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && (hfunc != ETH_RSS_HASH_TOP))
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = indir[i];
+ }
+
+ if (key)
+ memcpy(priv->rss.key, key, sizeof(priv->rss.key));
+
+ return stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
+static void stmmac_get_channels(struct net_device *dev,
+ struct ethtool_channels *chan)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ chan->rx_count = priv->plat->rx_queues_to_use;
+ chan->tx_count = priv->plat->tx_queues_to_use;
+ chan->max_rx = priv->dma_cap.number_rx_queues;
+ chan->max_tx = priv->dma_cap.number_tx_queues;
+}
+
+static int stmmac_set_channels(struct net_device *dev,
+ struct ethtool_channels *chan)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (chan->rx_count > priv->dma_cap.number_rx_queues ||
+ chan->tx_count > priv->dma_cap.number_tx_queues ||
+ !chan->rx_count || !chan->tx_count)
+ return -EINVAL;
+
+ return stmmac_reinit_queues(dev, chan->rx_count, chan->tx_count);
+}
+
+static int stmmac_get_ts_info(struct net_device *dev,
+ struct ethtool_ts_info *info)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if ((priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
+
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+
+ if (priv->ptp_clock)
+ info->phc_index = ptp_clock_index(priv->ptp_clock);
+
+ info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+
+ info->rx_filters = ((1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+ (1 << HWTSTAMP_FILTER_ALL));
+ return 0;
+ } else
+ return ethtool_op_get_ts_info(dev, info);
+}
+
+static int stmmac_get_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna, void *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ *(u32 *)data = priv->rx_copybreak;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int stmmac_set_tunable(struct net_device *dev,
+ const struct ethtool_tunable *tuna,
+ const void *data)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ switch (tuna->id) {
+ case ETHTOOL_RX_COPYBREAK:
+ priv->rx_copybreak = *(u32 *)data;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static const struct ethtool_ops stmmac_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+ ETHTOOL_COALESCE_MAX_FRAMES,
+ .begin = stmmac_check_if_running,
+ .get_drvinfo = stmmac_ethtool_getdrvinfo,
+ .get_msglevel = stmmac_ethtool_getmsglevel,
+ .set_msglevel = stmmac_ethtool_setmsglevel,
+ .get_regs = stmmac_ethtool_gregs,
+ .get_regs_len = stmmac_ethtool_get_regs_len,
+ .get_link = ethtool_op_get_link,
+ .nway_reset = stmmac_nway_reset,
+ .get_ringparam = stmmac_get_ringparam,
+ .set_ringparam = stmmac_set_ringparam,
+ .get_pauseparam = stmmac_get_pauseparam,
+ .set_pauseparam = stmmac_set_pauseparam,
+ .self_test = stmmac_selftest_run,
+ .get_ethtool_stats = stmmac_get_ethtool_stats,
+ .get_strings = stmmac_get_strings,
+ .get_wol = stmmac_get_wol,
+ .set_wol = stmmac_set_wol,
+ .get_eee = stmmac_ethtool_op_get_eee,
+ .set_eee = stmmac_ethtool_op_set_eee,
+ .get_sset_count = stmmac_get_sset_count,
+ .get_rxnfc = stmmac_get_rxnfc,
+ .get_rxfh_key_size = stmmac_get_rxfh_key_size,
+ .get_rxfh_indir_size = stmmac_get_rxfh_indir_size,
+ .get_rxfh = stmmac_get_rxfh,
+ .set_rxfh = stmmac_set_rxfh,
+ .get_ts_info = stmmac_get_ts_info,
+ .get_coalesce = stmmac_get_coalesce,
+ .set_coalesce = stmmac_set_coalesce,
+ .get_per_queue_coalesce = stmmac_get_per_queue_coalesce,
+ .set_per_queue_coalesce = stmmac_set_per_queue_coalesce,
+ .get_channels = stmmac_get_channels,
+ .set_channels = stmmac_set_channels,
+ .get_tunable = stmmac_get_tunable,
+ .set_tunable = stmmac_set_tunable,
+ .get_link_ksettings = stmmac_ethtool_get_link_ksettings,
+ .set_link_ksettings = stmmac_ethtool_set_link_ksettings,
+};
+
+void stmmac_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &stmmac_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
new file mode 100644
index 000000000..8b50f0305
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_hwtstamp.c
@@ -0,0 +1,224 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+ This implements all the API for managing HW timestamp & PTP.
+
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/ptp_clock_kernel.h>
+#include "common.h"
+#include "stmmac_ptp.h"
+#include "dwmac4.h"
+#include "stmmac.h"
+
+static void config_hw_tstamping(void __iomem *ioaddr, u32 data)
+{
+ writel(data, ioaddr + PTP_TCR);
+}
+
+static void config_sub_second_increment(void __iomem *ioaddr,
+ u32 ptp_clock, int gmac4, u32 *ssinc)
+{
+ u32 value = readl(ioaddr + PTP_TCR);
+ unsigned long data;
+ u32 reg_value;
+
+ /* For GMAC3.x, 4.x versions, in "fine adjustement mode" set sub-second
+ * increment to twice the number of nanoseconds of a clock cycle.
+ * The calculation of the default_addend value by the caller will set it
+ * to mid-range = 2^31 when the remainder of this division is zero,
+ * which will make the accumulator overflow once every 2 ptp_clock
+ * cycles, adding twice the number of nanoseconds of a clock cycle :
+ * 2000000000ULL / ptp_clock.
+ */
+ if (value & PTP_TCR_TSCFUPDT)
+ data = (2000000000ULL / ptp_clock);
+ else
+ data = (1000000000ULL / ptp_clock);
+
+ /* 0.465ns accuracy */
+ if (!(value & PTP_TCR_TSCTRLSSR))
+ data = (data * 1000) / 465;
+
+ if (data > PTP_SSIR_SSINC_MAX)
+ data = PTP_SSIR_SSINC_MAX;
+
+ reg_value = data;
+ if (gmac4)
+ reg_value <<= GMAC4_PTP_SSIR_SSINC_SHIFT;
+
+ writel(reg_value, ioaddr + PTP_SSIR);
+
+ if (ssinc)
+ *ssinc = data;
+}
+
+static int init_systime(void __iomem *ioaddr, u32 sec, u32 nsec)
+{
+ u32 value;
+
+ writel(sec, ioaddr + PTP_STSUR);
+ writel(nsec, ioaddr + PTP_STNSUR);
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSINIT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time initialize to complete */
+ return readl_poll_timeout_atomic(ioaddr + PTP_TCR, value,
+ !(value & PTP_TCR_TSINIT),
+ 10, 100000);
+}
+
+static int config_addend(void __iomem *ioaddr, u32 addend)
+{
+ u32 value;
+ int limit;
+
+ writel(addend, ioaddr + PTP_TAR);
+ /* issue command to update the addend value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSADDREG;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present addend update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSADDREG))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int adjust_systime(void __iomem *ioaddr, u32 sec, u32 nsec,
+ int add_sub, int gmac4)
+{
+ u32 value;
+ int limit;
+
+ if (add_sub) {
+ /* If the new sec value needs to be subtracted with
+ * the system time, then MAC_STSUR reg should be
+ * programmed with (2^32 – <new_sec_value>)
+ */
+ if (gmac4)
+ sec = -sec;
+
+ value = readl(ioaddr + PTP_TCR);
+ if (value & PTP_TCR_TSCTRLSSR)
+ nsec = (PTP_DIGITAL_ROLLOVER_MODE - nsec);
+ else
+ nsec = (PTP_BINARY_ROLLOVER_MODE - nsec);
+ }
+
+ writel(sec, ioaddr + PTP_STSUR);
+ value = (add_sub << PTP_STNSUR_ADDSUB_SHIFT) | nsec;
+ writel(value, ioaddr + PTP_STNSUR);
+
+ /* issue command to initialize the system time value */
+ value = readl(ioaddr + PTP_TCR);
+ value |= PTP_TCR_TSUPDT;
+ writel(value, ioaddr + PTP_TCR);
+
+ /* wait for present system time adjust/update to complete */
+ limit = 10;
+ while (limit--) {
+ if (!(readl(ioaddr + PTP_TCR) & PTP_TCR_TSUPDT))
+ break;
+ mdelay(10);
+ }
+ if (limit < 0)
+ return -EBUSY;
+
+ return 0;
+}
+
+static void get_systime(void __iomem *ioaddr, u64 *systime)
+{
+ u64 ns, sec0, sec1;
+
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ do {
+ sec0 = sec1;
+ /* Get the TSSS value */
+ ns = readl_relaxed(ioaddr + PTP_STNSR);
+ /* Get the TSS value */
+ sec1 = readl_relaxed(ioaddr + PTP_STSR);
+ } while (sec0 != sec1);
+
+ if (systime)
+ *systime = ns + (sec1 * 1000000000ULL);
+}
+
+static void get_ptptime(void __iomem *ptpaddr, u64 *ptp_time)
+{
+ u64 ns;
+
+ ns = readl(ptpaddr + PTP_ATNR);
+ ns += readl(ptpaddr + PTP_ATSR) * NSEC_PER_SEC;
+
+ *ptp_time = ns;
+}
+
+static void timestamp_interrupt(struct stmmac_priv *priv)
+{
+ u32 num_snapshot, ts_status, tsync_int;
+ struct ptp_clock_event event;
+ unsigned long flags;
+ u64 ptp_time;
+ int i;
+
+ if (priv->plat->int_snapshot_en) {
+ wake_up(&priv->tstamp_busy_wait);
+ return;
+ }
+
+ tsync_int = readl(priv->ioaddr + GMAC_INT_STATUS) & GMAC_INT_TSIE;
+
+ if (!tsync_int)
+ return;
+
+ /* Read timestamp status to clear interrupt from either external
+ * timestamp or start/end of PPS.
+ */
+ ts_status = readl(priv->ioaddr + GMAC_TIMESTAMP_STATUS);
+
+ if (!priv->plat->ext_snapshot_en)
+ return;
+
+ num_snapshot = (ts_status & GMAC_TIMESTAMP_ATSNS_MASK) >>
+ GMAC_TIMESTAMP_ATSNS_SHIFT;
+
+ for (i = 0; i < num_snapshot; i++) {
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ get_ptptime(priv->ptpaddr, &ptp_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+ event.type = PTP_CLOCK_EXTTS;
+ event.index = 0;
+ event.timestamp = ptp_time;
+ ptp_clock_event(priv->ptp_clock, &event);
+ }
+}
+
+const struct stmmac_hwtimestamp stmmac_ptp = {
+ .config_hw_tstamping = config_hw_tstamping,
+ .init_systime = init_systime,
+ .config_sub_second_increment = config_sub_second_increment,
+ .config_addend = config_addend,
+ .adjust_systime = adjust_systime,
+ .get_systime = get_systime,
+ .get_ptptime = get_ptptime,
+ .timestamp_interrupt = timestamp_interrupt,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
new file mode 100644
index 000000000..e988a60c8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -0,0 +1,7679 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
+ ST Ethernet IPs are built around a Synopsys IP Core.
+
+ Copyright(C) 2007-2011 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+
+ Documentation available at:
+ http://www.stlinux.com
+ Support available at:
+ https://bugzilla.stlinux.com/
+*******************************************************************************/
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/crc32.h>
+#include <linux/mii.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/dma-mapping.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/prefetch.h>
+#include <linux/pinctrl/consumer.h>
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#endif /* CONFIG_DEBUG_FS */
+#include <linux/net_tstamp.h>
+#include <linux/phylink.h>
+#include <linux/udp.h>
+#include <linux/bpf_trace.h>
+#include <net/pkt_cls.h>
+#include <net/xdp_sock_drv.h>
+#include "stmmac_ptp.h"
+#include "stmmac.h"
+#include "stmmac_xdp.h"
+#include <linux/reset.h>
+#include <linux/of_mdio.h>
+#include "dwmac1000.h"
+#include "dwxgmac2.h"
+#include "hwif.h"
+
+/* As long as the interface is active, we keep the timestamping counter enabled
+ * with fine resolution and binary rollover. This avoid non-monotonic behavior
+ * (clock jumps) when changing timestamping settings at runtime.
+ */
+#define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
+ PTP_TCR_TSCTRLSSR)
+
+#define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
+#define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
+
+/* Module parameters */
+#define TX_TIMEO 5000
+static int watchdog = TX_TIMEO;
+module_param(watchdog, int, 0644);
+MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
+
+static int debug = -1;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
+
+static int phyaddr = -1;
+module_param(phyaddr, int, 0444);
+MODULE_PARM_DESC(phyaddr, "Physical device address");
+
+#define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
+#define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
+
+/* Limit to make sure XDP TX and slow path can coexist */
+#define STMMAC_XSK_TX_BUDGET_MAX 256
+#define STMMAC_TX_XSK_AVAIL 16
+#define STMMAC_RX_FILL_BATCH 16
+
+#define STMMAC_XDP_PASS 0
+#define STMMAC_XDP_CONSUMED BIT(0)
+#define STMMAC_XDP_TX BIT(1)
+#define STMMAC_XDP_REDIRECT BIT(2)
+
+static int flow_ctrl = FLOW_AUTO;
+module_param(flow_ctrl, int, 0644);
+MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
+
+static int pause = PAUSE_TIME;
+module_param(pause, int, 0644);
+MODULE_PARM_DESC(pause, "Flow Control Pause Time");
+
+#define TC_DEFAULT 64
+static int tc = TC_DEFAULT;
+module_param(tc, int, 0644);
+MODULE_PARM_DESC(tc, "DMA threshold control value");
+
+#define DEFAULT_BUFSIZE 1536
+static int buf_sz = DEFAULT_BUFSIZE;
+module_param(buf_sz, int, 0644);
+MODULE_PARM_DESC(buf_sz, "DMA buffer size");
+
+#define STMMAC_RX_COPYBREAK 256
+
+static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
+
+#define STMMAC_DEFAULT_LPI_TIMER 1000
+static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+module_param(eee_timer, int, 0644);
+MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
+#define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
+
+/* By default the driver will use the ring mode to manage tx and rx descriptors,
+ * but allow user to force to use the chain instead of the ring
+ */
+static unsigned int chain_mode;
+module_param(chain_mode, int, 0444);
+MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
+
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
+/* For MSI interrupts handling */
+static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
+static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
+static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
+static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
+static void stmmac_reset_queues_param(struct stmmac_priv *priv);
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
+static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan);
+
+#ifdef CONFIG_DEBUG_FS
+static const struct net_device_ops stmmac_netdev_ops;
+static void stmmac_init_fs(struct net_device *dev);
+static void stmmac_exit_fs(struct net_device *dev);
+#endif
+
+#define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
+
+int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
+{
+ int ret = 0;
+
+ if (enabled) {
+ ret = clk_prepare_enable(priv->plat->stmmac_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(priv->plat->pclk);
+ if (ret) {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ return ret;
+ }
+ if (priv->plat->clks_config) {
+ ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+ if (ret) {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_disable_unprepare(priv->plat->pclk);
+ return ret;
+ }
+ }
+ } else {
+ clk_disable_unprepare(priv->plat->stmmac_clk);
+ clk_disable_unprepare(priv->plat->pclk);
+ if (priv->plat->clks_config)
+ priv->plat->clks_config(priv->plat->bsp_priv, enabled);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
+
+/**
+ * stmmac_verify_args - verify the driver parameters.
+ * Description: it checks the driver parameters and set a default in case of
+ * errors.
+ */
+static void stmmac_verify_args(void)
+{
+ if (unlikely(watchdog < 0))
+ watchdog = TX_TIMEO;
+ if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DEFAULT_BUFSIZE;
+ if (unlikely(flow_ctrl > 1))
+ flow_ctrl = FLOW_AUTO;
+ else if (likely(flow_ctrl < 0))
+ flow_ctrl = FLOW_OFF;
+ if (unlikely((pause < 0) || (pause > 0xffff)))
+ pause = PAUSE_TIME;
+ if (eee_timer < 0)
+ eee_timer = STMMAC_DEFAULT_LPI_TIMER;
+}
+
+static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
+ u32 queue;
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps)) {
+ napi_disable(&ch->rxtx_napi);
+ continue;
+ }
+
+ if (queue < rx_queues_cnt)
+ napi_disable(&ch->rx_napi);
+ if (queue < tx_queues_cnt)
+ napi_disable(&ch->tx_napi);
+ }
+}
+
+/**
+ * stmmac_disable_all_queues - Disable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_disable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ struct stmmac_rx_queue *rx_q;
+ u32 queue;
+
+ /* synchronize_rcu() needed for pending XDP buffers to drain */
+ for (queue = 0; queue < rx_queues_cnt; queue++) {
+ rx_q = &priv->dma_conf.rx_queue[queue];
+ if (rx_q->xsk_pool) {
+ synchronize_rcu();
+ break;
+ }
+ }
+
+ __stmmac_disable_all_queues(priv);
+}
+
+/**
+ * stmmac_enable_all_queues - Enable all queues
+ * @priv: driver private structure
+ */
+static void stmmac_enable_all_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
+ u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
+ u32 queue;
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps)) {
+ napi_enable(&ch->rxtx_napi);
+ continue;
+ }
+
+ if (queue < rx_queues_cnt)
+ napi_enable(&ch->rx_napi);
+ if (queue < tx_queues_cnt)
+ napi_enable(&ch->tx_napi);
+ }
+}
+
+static void stmmac_service_event_schedule(struct stmmac_priv *priv)
+{
+ if (!test_bit(STMMAC_DOWN, &priv->state) &&
+ !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
+ queue_work(priv->wq, &priv->service_task);
+}
+
+static void stmmac_global_err(struct stmmac_priv *priv)
+{
+ netif_carrier_off(priv->dev);
+ set_bit(STMMAC_RESET_REQUESTED, &priv->state);
+ stmmac_service_event_schedule(priv);
+}
+
+/**
+ * stmmac_clk_csr_set - dynamically set the MDC clock
+ * @priv: driver private structure
+ * Description: this is to dynamically set the MDC clock according to the csr
+ * clock input.
+ * Note:
+ * If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed (as reported in the driver
+ * documentation). Viceversa the driver will try to set the MDC
+ * clock dynamically according to the actual clock input.
+ */
+static void stmmac_clk_csr_set(struct stmmac_priv *priv)
+{
+ u32 clk_rate;
+
+ clk_rate = clk_get_rate(priv->plat->stmmac_clk);
+
+ /* Platform provided default clk_csr would be assumed valid
+ * for all other cases except for the below mentioned ones.
+ * For values higher than the IEEE 802.3 specified frequency
+ * we can not estimate the proper divider as it is not known
+ * the frequency of clk_csr_i. So we do not change the default
+ * divider.
+ */
+ if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
+ if (clk_rate < CSR_F_35M)
+ priv->clk_csr = STMMAC_CSR_20_35M;
+ else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
+ priv->clk_csr = STMMAC_CSR_35_60M;
+ else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
+ priv->clk_csr = STMMAC_CSR_60_100M;
+ else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
+ priv->clk_csr = STMMAC_CSR_100_150M;
+ else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
+ priv->clk_csr = STMMAC_CSR_150_250M;
+ else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
+ priv->clk_csr = STMMAC_CSR_250_300M;
+ }
+
+ if (priv->plat->has_sun8i) {
+ if (clk_rate > 160000000)
+ priv->clk_csr = 0x03;
+ else if (clk_rate > 80000000)
+ priv->clk_csr = 0x02;
+ else if (clk_rate > 40000000)
+ priv->clk_csr = 0x01;
+ else
+ priv->clk_csr = 0;
+ }
+
+ if (priv->plat->has_xgmac) {
+ if (clk_rate > 400000000)
+ priv->clk_csr = 0x5;
+ else if (clk_rate > 350000000)
+ priv->clk_csr = 0x4;
+ else if (clk_rate > 300000000)
+ priv->clk_csr = 0x3;
+ else if (clk_rate > 250000000)
+ priv->clk_csr = 0x2;
+ else if (clk_rate > 150000000)
+ priv->clk_csr = 0x1;
+ else
+ priv->clk_csr = 0x0;
+ }
+}
+
+static void print_pkt(unsigned char *buf, int len)
+{
+ pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
+ print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
+}
+
+static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ u32 avail;
+
+ if (tx_q->dirty_tx > tx_q->cur_tx)
+ avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
+ else
+ avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
+
+ return avail;
+}
+
+/**
+ * stmmac_rx_dirty - Get RX queue dirty
+ * @priv: driver private structure
+ * @queue: RX queue index
+ */
+static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ u32 dirty;
+
+ if (rx_q->dirty_rx <= rx_q->cur_rx)
+ dirty = rx_q->cur_rx - rx_q->dirty_rx;
+ else
+ dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
+
+ return dirty;
+}
+
+static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
+{
+ int tx_lpi_timer;
+
+ /* Clear/set the SW EEE timer flag based on LPI ET enablement */
+ priv->eee_sw_timer_en = en ? 0 : 1;
+ tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
+ stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
+}
+
+/**
+ * stmmac_enable_eee_mode - check and enter in LPI mode
+ * @priv: driver private structure
+ * Description: this function is to verify and enter in LPI mode in case of
+ * EEE.
+ */
+static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* check if all TX queues have the work finished */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ return -EBUSY; /* still unfinished work */
+ }
+
+ /* Check and enter in LPI mode */
+ if (!priv->tx_path_in_lpi_mode)
+ stmmac_set_eee_mode(priv, priv->hw,
+ priv->plat->en_tx_lpi_clockgating);
+ return 0;
+}
+
+/**
+ * stmmac_disable_eee_mode - disable and exit from LPI mode
+ * @priv: driver private structure
+ * Description: this function is to exit and disable EEE in case of
+ * LPI state is true. This is called by the xmit.
+ */
+void stmmac_disable_eee_mode(struct stmmac_priv *priv)
+{
+ if (!priv->eee_sw_timer_en) {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ return;
+ }
+
+ stmmac_reset_eee_mode(priv, priv->hw);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+}
+
+/**
+ * stmmac_eee_ctrl_timer - EEE TX SW timer.
+ * @t: timer_list struct containing private info
+ * Description:
+ * if there is no data transfer and if we are not in LPI state,
+ * then MAC Transmitter can be moved to LPI state.
+ */
+static void stmmac_eee_ctrl_timer(struct timer_list *t)
+{
+ struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
+
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+}
+
+/**
+ * stmmac_eee_init - init EEE
+ * @priv: driver private structure
+ * Description:
+ * if the GMAC supports the EEE (from the HW cap reg) and the phy device
+ * can also manage EEE, this function enable the LPI state and start related
+ * timer.
+ */
+bool stmmac_eee_init(struct stmmac_priv *priv)
+{
+ int eee_tw_timer = priv->eee_tw_timer;
+
+ /* Using PCS we cannot dial with the phy registers at this stage
+ * so we do not support extra feature like EEE.
+ */
+ if (priv->hw->pcs == STMMAC_PCS_TBI ||
+ priv->hw->pcs == STMMAC_PCS_RTBI)
+ return false;
+
+ /* Check if MAC core supports the EEE feature. */
+ if (!priv->dma_cap.eee)
+ return false;
+
+ mutex_lock(&priv->lock);
+
+ /* Check if it needs to be deactivated */
+ if (!priv->eee_active) {
+ if (priv->eee_enabled) {
+ netdev_dbg(priv->dev, "disable EEE\n");
+ stmmac_lpi_entry_timer_config(priv, 0);
+ del_timer_sync(&priv->eee_ctrl_timer);
+ stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ false);
+ }
+ mutex_unlock(&priv->lock);
+ return false;
+ }
+
+ if (priv->eee_active && !priv->eee_enabled) {
+ timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
+ stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
+ eee_tw_timer);
+ if (priv->hw->xpcs)
+ xpcs_config_eee(priv->hw->xpcs,
+ priv->plat->mult_fact_100ns,
+ true);
+ }
+
+ if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->tx_path_in_lpi_mode = false;
+ stmmac_lpi_entry_timer_config(priv, 1);
+ } else {
+ stmmac_lpi_entry_timer_config(priv, 0);
+ mod_timer(&priv->eee_ctrl_timer,
+ STMMAC_LPI_T(priv->tx_lpi_timer));
+ }
+
+ mutex_unlock(&priv->lock);
+ netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
+ return true;
+}
+
+/* stmmac_get_tx_hwtstamp - get HW TX timestamps
+ * @priv: driver private structure
+ * @p : descriptor pointer
+ * @skb : the socket buffer
+ * Description :
+ * This function will read timestamp from the descriptor & pass it to stack.
+ * and also perform some sanity checks.
+ */
+static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
+ struct dma_desc *p, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps shhwtstamp;
+ bool found = false;
+ u64 ns = 0;
+
+ if (!priv->hwts_tx_en)
+ return;
+
+ /* exit if skb doesn't support hw tstamp */
+ if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
+ return;
+
+ /* check tx tstamp status */
+ if (stmmac_get_tx_timestamp_status(priv, p)) {
+ stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
+ found = true;
+ } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
+ found = true;
+ }
+
+ if (found) {
+ ns -= priv->plat->cdc_error_adj;
+
+ memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp.hwtstamp = ns_to_ktime(ns);
+
+ netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
+ /* pass tstamp to stack */
+ skb_tstamp_tx(skb, &shhwtstamp);
+ }
+}
+
+/* stmmac_get_rx_hwtstamp - get HW RX timestamps
+ * @priv: driver private structure
+ * @p : descriptor pointer
+ * @np : next descriptor pointer
+ * @skb : the socket buffer
+ * Description :
+ * This function will read received packet's timestamp from the descriptor
+ * and pass it to stack. It also perform some sanity checks.
+ */
+static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
+ struct dma_desc *np, struct sk_buff *skb)
+{
+ struct skb_shared_hwtstamps *shhwtstamp = NULL;
+ struct dma_desc *desc = p;
+ u64 ns = 0;
+
+ if (!priv->hwts_rx_en)
+ return;
+ /* For GMAC4, the valid timestamp is from CTX next desc. */
+ if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
+ desc = np;
+
+ /* Check if timestamp is available */
+ if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
+ stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
+
+ ns -= priv->plat->cdc_error_adj;
+
+ netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
+ shhwtstamp = skb_hwtstamps(skb);
+ memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
+ shhwtstamp->hwtstamp = ns_to_ktime(ns);
+ } else {
+ netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
+ }
+}
+
+/**
+ * stmmac_hwtstamp_set - control hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function configures the MAC to enable/disable both outgoing(TX)
+ * and incoming(RX) packets time stamping based on user input.
+ * Return Value:
+ * 0 on success and an appropriate -ve integer on failure.
+ */
+static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config config;
+ u32 ptp_v2 = 0;
+ u32 tstamp_all = 0;
+ u32 ptp_over_ipv4_udp = 0;
+ u32 ptp_over_ipv6_udp = 0;
+ u32 ptp_over_ethernet = 0;
+ u32 snap_type_sel = 0;
+ u32 ts_master_en = 0;
+ u32 ts_event_en = 0;
+
+ if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
+ netdev_alert(priv->dev, "No support for HW time stamping\n");
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return -EOPNOTSUPP;
+ }
+
+ if (copy_from_user(&config, ifr->ifr_data,
+ sizeof(config)))
+ return -EFAULT;
+
+ netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
+ __func__, config.flags, config.tx_type, config.rx_filter);
+
+ if (config.tx_type != HWTSTAMP_TX_OFF &&
+ config.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ if (priv->adv_ts) {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* time stamp no incoming packet at all */
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ /* 'xmac' hardware can support Sync, Pdelay_Req and
+ * Pdelay_resp by setting bit14 and bits17/16 to 01
+ * This leaves Delay_Req timestamps out.
+ * Enable all events *and* general purpose message
+ * timestamping
+ */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ /* PTP v1, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ /* PTP v1, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ /* PTP v2, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for all event messages */
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ /* PTP v2, UDP, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ /* PTP v2, UDP, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ /* PTP v2/802.AS1 any layer, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
+ if (priv->synopsys_id < DWMAC_CORE_4_10)
+ ts_event_en = PTP_TCR_TSEVNTENA;
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ /* PTP v2/802.AS1, any layer, Sync packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for SYNC messages only */
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ /* PTP v2/802.AS1, any layer, Delay_req packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
+ ptp_v2 = PTP_TCR_TSVER2ENA;
+ /* take time stamp for Delay_Req messages only */
+ ts_master_en = PTP_TCR_TSMSTRENA;
+ ts_event_en = PTP_TCR_TSEVNTENA;
+
+ ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
+ ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
+ ptp_over_ethernet = PTP_TCR_TSIPENA;
+ break;
+
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_ALL:
+ /* time stamp any incoming packet */
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ tstamp_all = PTP_TCR_TSENALL;
+ break;
+
+ default:
+ return -ERANGE;
+ }
+ } else {
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ config.rx_filter = HWTSTAMP_FILTER_NONE;
+ break;
+ default:
+ /* PTP v1, UDP, any kind of event packet */
+ config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ }
+ }
+ priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
+ priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
+
+ priv->systime_flags = STMMAC_HWTS_ACTIVE;
+
+ if (priv->hwts_tx_en || priv->hwts_rx_en) {
+ priv->systime_flags |= tstamp_all | ptp_v2 |
+ ptp_over_ethernet | ptp_over_ipv6_udp |
+ ptp_over_ipv4_udp | ts_event_en |
+ ts_master_en | snap_type_sel;
+ }
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
+
+ memcpy(&priv->tstamp_config, &config, sizeof(config));
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_hwtstamp_get - read hardware timestamping.
+ * @dev: device pointer.
+ * @ifr: An IOCTL specific structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * Description:
+ * This function obtain the current hardware timestamping settings
+ * as requested.
+ */
+static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config *config = &priv->tstamp_config;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
+}
+
+/**
+ * stmmac_init_tstamp_counter - init hardware timestamping counter
+ * @priv: driver private structure
+ * @systime_flags: timestamping flags
+ * Description:
+ * Initialize hardware counter for packet timestamping.
+ * This is valid as long as the interface is open and not suspended.
+ * Will be rerun after resuming from suspend, case in which the timestamping
+ * flags updated by stmmac_hwtstamp_set() also need to be restored.
+ */
+int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
+{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ struct timespec64 now;
+ u32 sec_inc = 0;
+ u64 temp = 0;
+
+ if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
+ return -EOPNOTSUPP;
+
+ stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
+ priv->systime_flags = systime_flags;
+
+ /* program Sub Second Increment reg */
+ stmmac_config_sub_second_increment(priv, priv->ptpaddr,
+ priv->plat->clk_ptp_rate,
+ xmac, &sec_inc);
+ temp = div_u64(1000000000ULL, sec_inc);
+
+ /* Store sub second increment for later use */
+ priv->sub_second_inc = sec_inc;
+
+ /* calculate default added value:
+ * formula is :
+ * addend = (2^32)/freq_div_ratio;
+ * where, freq_div_ratio = 1e9ns/sec_inc
+ */
+ temp = (u64)(temp << 32);
+ priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
+ stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
+
+ /* initialize system time */
+ ktime_get_real_ts64(&now);
+
+ /* lower 32 bits of tv_sec are safe until y2106 */
+ stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
+
+/**
+ * stmmac_init_ptp - init PTP
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
+ * This is done by looking at the HW cap. register.
+ * This function also registers the ptp driver.
+ */
+static int stmmac_init_ptp(struct stmmac_priv *priv)
+{
+ bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ int ret;
+
+ if (priv->plat->ptp_clk_freq_config)
+ priv->plat->ptp_clk_freq_config(priv);
+
+ ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
+ if (ret)
+ return ret;
+
+ priv->adv_ts = 0;
+ /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
+ if (xmac && priv->dma_cap.atime_stamp)
+ priv->adv_ts = 1;
+ /* Dwmac 3.x core with extend_desc can support adv_ts */
+ else if (priv->extend_desc && priv->dma_cap.atime_stamp)
+ priv->adv_ts = 1;
+
+ if (priv->dma_cap.time_stamp)
+ netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
+
+ if (priv->adv_ts)
+ netdev_info(priv->dev,
+ "IEEE 1588-2008 Advanced Timestamp supported\n");
+
+ priv->hwts_tx_en = 0;
+ priv->hwts_rx_en = 0;
+
+ return 0;
+}
+
+static void stmmac_release_ptp(struct stmmac_priv *priv)
+{
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+ stmmac_ptp_unregister(priv);
+}
+
+/**
+ * stmmac_mac_flow_ctrl - Configure flow control in all queues
+ * @priv: driver private structure
+ * @duplex: duplex passed to the next function
+ * Description: It is used for configuring the flow control in all queues
+ */
+static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+
+ stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
+ priv->pause, tx_cnt);
+}
+
+static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
+ phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ if (!priv->hw->xpcs)
+ return NULL;
+
+ return &priv->hw->xpcs->pcs;
+}
+
+static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ /* Nothing to do, xpcs_config() handles everything */
+}
+
+static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (is_up && *hs_enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
+ MPACKET_VERIFY);
+ } else {
+ *lo_state = FPE_STATE_OFF;
+ *lp_state = FPE_STATE_OFF;
+ }
+}
+
+static void stmmac_mac_link_down(struct phylink_config *config,
+ unsigned int mode, phy_interface_t interface)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ priv->eee_active = false;
+ priv->tx_lpi_enabled = false;
+ priv->eee_enabled = stmmac_eee_init(priv);
+ stmmac_set_eee_pls(priv, priv->hw, false);
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_link_state_handle(priv, false);
+}
+
+static void stmmac_mac_link_up(struct phylink_config *config,
+ struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface,
+ int speed, int duplex,
+ bool tx_pause, bool rx_pause)
+{
+ struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
+ u32 old_ctrl, ctrl;
+
+ old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
+ ctrl = old_ctrl & ~priv->hw->link.speed_mask;
+
+ if (interface == PHY_INTERFACE_MODE_USXGMII) {
+ switch (speed) {
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_5000:
+ ctrl |= priv->hw->link.xgmii.speed5000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.xgmii.speed2500;
+ break;
+ default:
+ return;
+ }
+ } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
+ switch (speed) {
+ case SPEED_100000:
+ ctrl |= priv->hw->link.xlgmii.speed100000;
+ break;
+ case SPEED_50000:
+ ctrl |= priv->hw->link.xlgmii.speed50000;
+ break;
+ case SPEED_40000:
+ ctrl |= priv->hw->link.xlgmii.speed40000;
+ break;
+ case SPEED_25000:
+ ctrl |= priv->hw->link.xlgmii.speed25000;
+ break;
+ case SPEED_10000:
+ ctrl |= priv->hw->link.xgmii.speed10000;
+ break;
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ default:
+ return;
+ }
+ } else {
+ switch (speed) {
+ case SPEED_2500:
+ ctrl |= priv->hw->link.speed2500;
+ break;
+ case SPEED_1000:
+ ctrl |= priv->hw->link.speed1000;
+ break;
+ case SPEED_100:
+ ctrl |= priv->hw->link.speed100;
+ break;
+ case SPEED_10:
+ ctrl |= priv->hw->link.speed10;
+ break;
+ default:
+ return;
+ }
+ }
+
+ priv->speed = speed;
+
+ if (priv->plat->fix_mac_speed)
+ priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed);
+
+ if (!duplex)
+ ctrl &= ~priv->hw->link.duplex;
+ else
+ ctrl |= priv->hw->link.duplex;
+
+ /* Flow Control operation */
+ if (rx_pause && tx_pause)
+ priv->flow_ctrl = FLOW_AUTO;
+ else if (rx_pause && !tx_pause)
+ priv->flow_ctrl = FLOW_RX;
+ else if (!rx_pause && tx_pause)
+ priv->flow_ctrl = FLOW_TX;
+ else
+ priv->flow_ctrl = FLOW_OFF;
+
+ stmmac_mac_flow_ctrl(priv, duplex);
+
+ if (ctrl != old_ctrl)
+ writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
+
+ stmmac_mac_set(priv, priv->ioaddr, true);
+ if (phy && priv->dma_cap.eee) {
+ priv->eee_active =
+ phy_init_eee(phy, !priv->plat->rx_clk_runs_in_lpi) >= 0;
+ priv->eee_enabled = stmmac_eee_init(priv);
+ priv->tx_lpi_enabled = priv->eee_enabled;
+ stmmac_set_eee_pls(priv, priv->hw, true);
+ }
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_link_state_handle(priv, true);
+}
+
+static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_select_pcs = stmmac_mac_select_pcs,
+ .mac_config = stmmac_mac_config,
+ .mac_link_down = stmmac_mac_link_down,
+ .mac_link_up = stmmac_mac_link_up,
+};
+
+/**
+ * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
+ * @priv: driver private structure
+ * Description: this is to verify if the HW supports the PCS.
+ * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
+ * configured for the TBI, RTBI, or SGMII PHY interface.
+ */
+static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
+{
+ int interface = priv->plat->interface;
+
+ if (priv->dma_cap.pcs) {
+ if ((interface == PHY_INTERFACE_MODE_RGMII) ||
+ (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
+ (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
+ (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_RGMII;
+ } else if (interface == PHY_INTERFACE_MODE_SGMII) {
+ netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
+ priv->hw->pcs = STMMAC_PCS_SGMII;
+ }
+ }
+}
+
+/**
+ * stmmac_init_phy - PHY initialization
+ * @dev: net device structure
+ * Description: it initializes the driver's PHY state, and attaches the PHY
+ * to the mac driver.
+ * Return value:
+ * 0 on success
+ */
+static int stmmac_init_phy(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct fwnode_handle *phy_fwnode;
+ struct fwnode_handle *fwnode;
+ int ret;
+
+ if (!phylink_expects_phy(priv->phylink))
+ return 0;
+
+ fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
+
+ if (fwnode)
+ phy_fwnode = fwnode_get_phy_node(fwnode);
+ else
+ phy_fwnode = NULL;
+
+ /* Some DT bindings do not set-up the PHY handle. Let's try to
+ * manually parse it
+ */
+ if (!phy_fwnode || IS_ERR(phy_fwnode)) {
+ int addr = priv->plat->phy_addr;
+ struct phy_device *phydev;
+
+ if (addr < 0) {
+ netdev_err(priv->dev, "no phy found\n");
+ return -ENODEV;
+ }
+
+ phydev = mdiobus_get_phy(priv->mii, addr);
+ if (!phydev) {
+ netdev_err(priv->dev, "no phy at addr %d\n", addr);
+ return -ENODEV;
+ }
+
+ ret = phylink_connect_phy(priv->phylink, phydev);
+ } else {
+ fwnode_handle_put(phy_fwnode);
+ ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
+ }
+
+ if (!priv->plat->pmt) {
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
+
+ phylink_ethtool_get_wol(priv->phylink, &wol);
+ device_set_wakeup_capable(priv->device, !!wol.supported);
+ device_set_wakeup_enable(priv->device, !!wol.wolopts);
+ }
+
+ return ret;
+}
+
+static int stmmac_phy_setup(struct stmmac_priv *priv)
+{
+ struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ int max_speed = priv->plat->max_speed;
+ int mode = priv->plat->phy_interface;
+ struct phylink *phylink;
+
+ priv->phylink_config.dev = &priv->dev->dev;
+ priv->phylink_config.type = PHYLINK_NETDEV;
+ if (priv->plat->mdio_bus_data)
+ priv->phylink_config.ovr_an_inband =
+ mdio_bus_data->xpcs_an_inband;
+
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
+
+ /* Set the platform/firmware specified interface mode */
+ __set_bit(mode, priv->phylink_config.supported_interfaces);
+
+ /* If we have an xpcs, it defines which PHY interfaces are supported. */
+ if (priv->hw->xpcs)
+ xpcs_get_interfaces(priv->hw->xpcs,
+ priv->phylink_config.supported_interfaces);
+
+ priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
+ MAC_10 | MAC_100;
+
+ if (!max_speed || max_speed >= 1000)
+ priv->phylink_config.mac_capabilities |= MAC_1000;
+
+ if (priv->plat->has_gmac4) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ } else if (priv->plat->has_xgmac) {
+ if (!max_speed || max_speed >= 2500)
+ priv->phylink_config.mac_capabilities |= MAC_2500FD;
+ if (!max_speed || max_speed >= 5000)
+ priv->phylink_config.mac_capabilities |= MAC_5000FD;
+ if (!max_speed || max_speed >= 10000)
+ priv->phylink_config.mac_capabilities |= MAC_10000FD;
+ if (!max_speed || max_speed >= 25000)
+ priv->phylink_config.mac_capabilities |= MAC_25000FD;
+ if (!max_speed || max_speed >= 40000)
+ priv->phylink_config.mac_capabilities |= MAC_40000FD;
+ if (!max_speed || max_speed >= 50000)
+ priv->phylink_config.mac_capabilities |= MAC_50000FD;
+ if (!max_speed || max_speed >= 100000)
+ priv->phylink_config.mac_capabilities |= MAC_100000FD;
+ }
+
+ /* Half-Duplex can only work with single queue */
+ if (priv->plat->tx_queues_to_use > 1)
+ priv->phylink_config.mac_capabilities &=
+ ~(MAC_10HD | MAC_100HD | MAC_1000HD);
+ priv->phylink_config.mac_managed_pm = true;
+
+ phylink = phylink_create(&priv->phylink_config, fwnode,
+ mode, &stmmac_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ priv->phylink = phylink;
+ return 0;
+}
+
+static void stmmac_display_rx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ unsigned int desc_size;
+ void *head_rx;
+ u32 queue;
+
+ /* Display RX rings */
+ for (queue = 0; queue < rx_cnt; queue++) {
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+
+ pr_info("\tRX Queue %u rings\n", queue);
+
+ if (priv->extend_desc) {
+ head_rx = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ head_rx = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ /* Display RX ring */
+ stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+}
+
+static void stmmac_display_tx_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ unsigned int desc_size;
+ void *head_tx;
+ u32 queue;
+
+ /* Display TX rings */
+ for (queue = 0; queue < tx_cnt; queue++) {
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
+
+ pr_info("\tTX Queue %d rings\n", queue);
+
+ if (priv->extend_desc) {
+ head_tx = (void *)tx_q->dma_etx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ head_tx = (void *)tx_q->dma_entx;
+ desc_size = sizeof(struct dma_edesc);
+ } else {
+ head_tx = (void *)tx_q->dma_tx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
+ tx_q->dma_tx_phy, desc_size);
+ }
+}
+
+static void stmmac_display_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ /* Display RX ring */
+ stmmac_display_rx_rings(priv, dma_conf);
+
+ /* Display TX ring */
+ stmmac_display_tx_rings(priv, dma_conf);
+}
+
+static int stmmac_set_bfsize(int mtu, int bufsize)
+{
+ int ret = bufsize;
+
+ if (mtu >= BUF_SIZE_8KiB)
+ ret = BUF_SIZE_16KiB;
+ else if (mtu >= BUF_SIZE_4KiB)
+ ret = BUF_SIZE_8KiB;
+ else if (mtu >= BUF_SIZE_2KiB)
+ ret = BUF_SIZE_4KiB;
+ else if (mtu > DEFAULT_BUFSIZE)
+ ret = BUF_SIZE_2KiB;
+ else
+ ret = DEFAULT_BUFSIZE;
+
+ return ret;
+}
+
+/**
+ * stmmac_clear_rx_descriptors - clear RX descriptors
+ * @priv: driver private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: RX queue index
+ * Description: this function is called to clear the RX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+ int i;
+
+ /* Clear the RX descriptors */
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
+ if (priv->extend_desc)
+ stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
+ priv->use_riwt, priv->mode,
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
+ else
+ stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
+ priv->use_riwt, priv->mode,
+ (i == dma_conf->dma_rx_size - 1),
+ dma_conf->dma_buf_sz);
+}
+
+/**
+ * stmmac_clear_tx_descriptors - clear tx descriptors
+ * @priv: driver private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: TX queue index.
+ * Description: this function is called to clear the TX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
+ int i;
+
+ /* Clear the TX descriptors */
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
+ int last = (i == (dma_conf->dma_tx_size - 1));
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = &tx_q->dma_etx[i].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[i].basic;
+ else
+ p = &tx_q->dma_tx[i];
+
+ stmmac_init_tx_desc(priv, p, priv->mode, last);
+ }
+}
+
+/**
+ * stmmac_clear_descriptors - clear descriptors
+ * @priv: driver private structure
+ * @dma_conf: structure to take the dma data
+ * Description: this function is called to clear the TX and RX descriptors
+ * in case of both basic and extended descriptors are used.
+ */
+static void stmmac_clear_descriptors(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* Clear the RX descriptors */
+ for (queue = 0; queue < rx_queue_cnt; queue++)
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
+
+ /* Clear the TX descriptors */
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ stmmac_clear_tx_descriptors(priv, dma_conf, queue);
+}
+
+/**
+ * stmmac_init_rx_buffers - init the RX descriptor buffer.
+ * @priv: driver private structure
+ * @dma_conf: structure to take the dma data
+ * @p: descriptor pointer
+ * @i: descriptor index
+ * @flags: gfp flag
+ * @queue: RX queue index
+ * Description: this function is called to allocate a receive buffer, perform
+ * the DMA mapping and init the descriptor.
+ */
+static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ struct dma_desc *p,
+ int i, gfp_t flags, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.host_dma_width <= 32)
+ gfp |= GFP_DMA32;
+
+ if (!buf->page) {
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->page)
+ return -ENOMEM;
+ buf->page_offset = stmmac_rx_offset(priv);
+ }
+
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->sec_page)
+ return -ENOMEM;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
+ } else {
+ buf->sec_page = NULL;
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
+ }
+
+ buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
+
+ stmmac_set_desc_addr(priv, p, buf->addr);
+ if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
+ stmmac_init_desc3(priv, p);
+
+ return 0;
+}
+
+/**
+ * stmmac_free_rx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @rx_q: RX queue
+ * @i: buffer index.
+ */
+static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
+ struct stmmac_rx_queue *rx_q,
+ int i)
+{
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+
+ if (buf->page)
+ page_pool_put_full_page(rx_q->page_pool, buf->page, false);
+ buf->page = NULL;
+
+ if (buf->sec_page)
+ page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
+ buf->sec_page = NULL;
+}
+
+/**
+ * stmmac_free_tx_buffer - free RX dma buffers
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: RX queue index
+ * @i: buffer index.
+ */
+static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, int i)
+{
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
+
+ if (tx_q->tx_skbuff_dma[i].buf &&
+ tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
+ if (tx_q->tx_skbuff_dma[i].map_as_page)
+ dma_unmap_page(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ tx_q->tx_skbuff_dma[i].buf,
+ tx_q->tx_skbuff_dma[i].len,
+ DMA_TO_DEVICE);
+ }
+
+ if (tx_q->xdpf[i] &&
+ (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
+ tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
+ xdp_return_frame(tx_q->xdpf[i]);
+ tx_q->xdpf[i] = NULL;
+ }
+
+ if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
+ tx_q->xsk_frames_done++;
+
+ if (tx_q->tx_skbuff[i] &&
+ tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
+ dev_kfree_skb_any(tx_q->tx_skbuff[i]);
+ tx_q->tx_skbuff[i] = NULL;
+ }
+
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+}
+
+/**
+ * dma_free_rx_skbufs - free RX dma buffers
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: RX queue index
+ */
+static void dma_free_rx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < dma_conf->dma_rx_size; i++)
+ stmmac_free_rx_buffer(priv, rx_q, i);
+}
+
+static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
+{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
+ struct dma_desc *p;
+ int ret;
+
+ if (priv->extend_desc)
+ p = &((rx_q->dma_erx + i)->basic);
+ else
+ p = rx_q->dma_rx + i;
+
+ ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
+ queue);
+ if (ret)
+ return ret;
+
+ rx_q->buf_alloc_num++;
+ }
+
+ return 0;
+}
+
+/**
+ * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: RX queue index
+ */
+static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+
+ if (!buf->xdp)
+ continue;
+
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ }
+}
+
+static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+ int i;
+
+ for (i = 0; i < dma_conf->dma_rx_size; i++) {
+ struct stmmac_rx_buffer *buf;
+ dma_addr_t dma_addr;
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + i);
+ else
+ p = rx_q->dma_rx + i;
+
+ buf = &rx_q->buf_pool[i];
+
+ buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
+ if (!buf->xdp)
+ return -ENOMEM;
+
+ dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
+ stmmac_set_desc_addr(priv, p, dma_addr);
+ rx_q->buf_alloc_num++;
+ }
+
+ return 0;
+}
+
+static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
+{
+ if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
+ return NULL;
+
+ return xsk_get_pool_from_qid(priv->dev, queue);
+}
+
+/**
+ * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: RX queue index
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue, gfp_t flags)
+{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+ int ret;
+
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_rx_phy=0x%08x\n", __func__,
+ (u32)rx_q->dma_rx_phy);
+
+ stmmac_clear_rx_descriptors(priv, dma_conf, queue);
+
+ xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
+
+ rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
+
+ if (rx_q->xsk_pool) {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
+ MEM_TYPE_XSK_BUFF_POOL,
+ NULL));
+ netdev_info(priv->dev,
+ "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
+ rx_q->queue_index);
+ xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
+ } else {
+ WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
+ MEM_TYPE_PAGE_POOL,
+ rx_q->page_pool));
+ netdev_info(priv->dev,
+ "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
+ rx_q->queue_index);
+ }
+
+ if (rx_q->xsk_pool) {
+ /* RX XDP ZC buffer pool may not be populated, e.g.
+ * xdpsock TX-only.
+ */
+ stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
+ } else {
+ ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
+ if (ret < 0)
+ return -ENOMEM;
+ }
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, rx_q->dma_erx,
+ rx_q->dma_rx_phy,
+ dma_conf->dma_rx_size, 1);
+ else
+ stmmac_mode_init(priv, rx_q->dma_rx,
+ rx_q->dma_rx_phy,
+ dma_conf->dma_rx_size, 0);
+ }
+
+ return 0;
+}
+
+static int init_dma_rx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ int queue;
+ int ret;
+
+ /* RX INITIALIZATION */
+ netif_dbg(priv, probe, priv->dev,
+ "SKB addresses:\nskb\t\tskb data\tdma data\n");
+
+ for (queue = 0; queue < rx_count; queue++) {
+ ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
+ if (ret)
+ goto err_init_rx_buffers;
+ }
+
+ return 0;
+
+err_init_rx_buffers:
+ while (queue >= 0) {
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+
+ if (rx_q->xsk_pool)
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
+ else
+ dma_free_rx_skbufs(priv, dma_conf, queue);
+
+ rx_q->buf_alloc_num = 0;
+ rx_q->xsk_pool = NULL;
+
+ queue--;
+ }
+
+ return ret;
+}
+
+/**
+ * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
+ * @priv: driver private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: TX queue index
+ * Description: this function initializes the DMA TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
+ int i;
+
+ netif_dbg(priv, probe, priv->dev,
+ "(%s) dma_tx_phy=0x%08x\n", __func__,
+ (u32)tx_q->dma_tx_phy);
+
+ /* Setup the chained descriptor addresses */
+ if (priv->mode == STMMAC_CHAIN_MODE) {
+ if (priv->extend_desc)
+ stmmac_mode_init(priv, tx_q->dma_etx,
+ tx_q->dma_tx_phy,
+ dma_conf->dma_tx_size, 1);
+ else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
+ stmmac_mode_init(priv, tx_q->dma_tx,
+ tx_q->dma_tx_phy,
+ dma_conf->dma_tx_size, 0);
+ }
+
+ tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
+
+ for (i = 0; i < dma_conf->dma_tx_size; i++) {
+ struct dma_desc *p;
+
+ if (priv->extend_desc)
+ p = &((tx_q->dma_etx + i)->basic);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &((tx_q->dma_entx + i)->basic);
+ else
+ p = tx_q->dma_tx + i;
+
+ stmmac_clear_desc(priv, p);
+
+ tx_q->tx_skbuff_dma[i].buf = 0;
+ tx_q->tx_skbuff_dma[i].map_as_page = false;
+ tx_q->tx_skbuff_dma[i].len = 0;
+ tx_q->tx_skbuff_dma[i].last_segment = false;
+ tx_q->tx_skbuff[i] = NULL;
+ }
+
+ return 0;
+}
+
+static int init_dma_tx_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_queue_cnt;
+ u32 queue;
+
+ tx_queue_cnt = priv->plat->tx_queues_to_use;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ __init_dma_tx_desc_rings(priv, dma_conf, queue);
+
+ return 0;
+}
+
+/**
+ * init_dma_desc_rings - init the RX/TX descriptor rings
+ * @dev: net device structure
+ * @dma_conf: structure to take the dma data
+ * @flags: gfp flag.
+ * Description: this function initializes the DMA RX/TX descriptors
+ * and allocates the socket buffers. It supports the chained and ring
+ * modes.
+ */
+static int init_dma_desc_rings(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf,
+ gfp_t flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
+ if (ret)
+ return ret;
+
+ ret = init_dma_tx_desc_rings(dev, dma_conf);
+
+ stmmac_clear_descriptors(priv, dma_conf);
+
+ if (netif_msg_hw(priv))
+ stmmac_display_rings(priv, dma_conf);
+
+ return ret;
+}
+
+/**
+ * dma_free_tx_skbufs - free TX dma buffers
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: TX queue index
+ */
+static void dma_free_tx_skbufs(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
+ int i;
+
+ tx_q->xsk_frames_done = 0;
+
+ for (i = 0; i < dma_conf->dma_tx_size; i++)
+ stmmac_free_tx_buffer(priv, dma_conf, queue, i);
+
+ if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
+ xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
+ tx_q->xsk_frames_done = 0;
+ tx_q->xsk_pool = NULL;
+ }
+}
+
+/**
+ * stmmac_free_tx_skbufs - free TX skb buffers
+ * @priv: private structure
+ */
+static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
+{
+ u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queue_cnt; queue++)
+ dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
+}
+
+/**
+ * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: RX queue index
+ */
+static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+
+ /* Release the DMA RX socket buffers */
+ if (rx_q->xsk_pool)
+ dma_free_rx_xskbufs(priv, dma_conf, queue);
+ else
+ dma_free_rx_skbufs(priv, dma_conf, queue);
+
+ rx_q->buf_alloc_num = 0;
+ rx_q->xsk_pool = NULL;
+
+ /* Free DMA regions of consistent memory previously allocated */
+ if (!priv->extend_desc)
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
+ sizeof(struct dma_desc),
+ rx_q->dma_rx, rx_q->dma_rx_phy);
+ else
+ dma_free_coherent(priv->device, dma_conf->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ rx_q->dma_erx, rx_q->dma_rx_phy);
+
+ if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
+ xdp_rxq_info_unreg(&rx_q->xdp_rxq);
+
+ kfree(rx_q->buf_pool);
+ if (rx_q->page_pool)
+ page_pool_destroy(rx_q->page_pool);
+}
+
+static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+
+ /* Free RX queue resources */
+ for (queue = 0; queue < rx_count; queue++)
+ __free_dma_rx_desc_resources(priv, dma_conf, queue);
+}
+
+/**
+ * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: TX queue index
+ */
+static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
+ size_t size;
+ void *addr;
+
+ /* Release the DMA TX socket buffers */
+ dma_free_tx_skbufs(priv, dma_conf, queue);
+
+ if (priv->extend_desc) {
+ size = sizeof(struct dma_extended_desc);
+ addr = tx_q->dma_etx;
+ } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
+ size = sizeof(struct dma_edesc);
+ addr = tx_q->dma_entx;
+ } else {
+ size = sizeof(struct dma_desc);
+ addr = tx_q->dma_tx;
+ }
+
+ size *= dma_conf->dma_tx_size;
+
+ dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
+
+ kfree(tx_q->tx_skbuff_dma);
+ kfree(tx_q->tx_skbuff);
+}
+
+static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ /* Free TX queue resources */
+ for (queue = 0; queue < tx_count; queue++)
+ __free_dma_tx_desc_resources(priv, dma_conf, queue);
+}
+
+/**
+ * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: RX queue index
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ bool xdp_prog = stmmac_xdp_is_enabled(priv);
+ struct page_pool_params pp_params = { 0 };
+ unsigned int num_pages;
+ unsigned int napi_id;
+ int ret;
+
+ rx_q->queue_index = queue;
+ rx_q->priv_data = priv;
+
+ pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+ pp_params.pool_size = dma_conf->dma_rx_size;
+ num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
+ pp_params.order = ilog2(num_pages);
+ pp_params.nid = dev_to_node(priv->device);
+ pp_params.dev = priv->device;
+ pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
+ pp_params.offset = stmmac_rx_offset(priv);
+ pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
+
+ rx_q->page_pool = page_pool_create(&pp_params);
+ if (IS_ERR(rx_q->page_pool)) {
+ ret = PTR_ERR(rx_q->page_pool);
+ rx_q->page_pool = NULL;
+ return ret;
+ }
+
+ rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
+ sizeof(*rx_q->buf_pool),
+ GFP_KERNEL);
+ if (!rx_q->buf_pool)
+ return -ENOMEM;
+
+ if (priv->extend_desc) {
+ rx_q->dma_erx = dma_alloc_coherent(priv->device,
+ dma_conf->dma_rx_size *
+ sizeof(struct dma_extended_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_erx)
+ return -ENOMEM;
+
+ } else {
+ rx_q->dma_rx = dma_alloc_coherent(priv->device,
+ dma_conf->dma_rx_size *
+ sizeof(struct dma_desc),
+ &rx_q->dma_rx_phy,
+ GFP_KERNEL);
+ if (!rx_q->dma_rx)
+ return -ENOMEM;
+ }
+
+ if (stmmac_xdp_is_enabled(priv) &&
+ test_bit(queue, priv->af_xdp_zc_qps))
+ napi_id = ch->rxtx_napi.napi_id;
+ else
+ napi_id = ch->rx_napi.napi_id;
+
+ ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
+ rx_q->queue_index,
+ napi_id);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to register xdp rxq info\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* RX queues buffers and DMA */
+ for (queue = 0; queue < rx_count; queue++) {
+ ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
+ if (ret)
+ goto err_dma;
+ }
+
+ return 0;
+
+err_dma:
+ free_dma_rx_desc_resources(priv, dma_conf);
+
+ return ret;
+}
+
+/**
+ * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ * @queue: TX queue index
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf,
+ u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
+ size_t size;
+ void *addr;
+
+ tx_q->queue_index = queue;
+ tx_q->priv_data = priv;
+
+ tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
+ sizeof(*tx_q->tx_skbuff_dma),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff_dma)
+ return -ENOMEM;
+
+ tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
+ sizeof(struct sk_buff *),
+ GFP_KERNEL);
+ if (!tx_q->tx_skbuff)
+ return -ENOMEM;
+
+ if (priv->extend_desc)
+ size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ size = sizeof(struct dma_edesc);
+ else
+ size = sizeof(struct dma_desc);
+
+ size *= dma_conf->dma_tx_size;
+
+ addr = dma_alloc_coherent(priv->device, size,
+ &tx_q->dma_tx_phy, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ if (priv->extend_desc)
+ tx_q->dma_etx = addr;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_q->dma_entx = addr;
+ else
+ tx_q->dma_tx = addr;
+
+ return 0;
+}
+
+static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+ int ret;
+
+ /* TX queues buffers and DMA */
+ for (queue = 0; queue < tx_count; queue++) {
+ ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
+ if (ret)
+ goto err_dma;
+ }
+
+ return 0;
+
+err_dma:
+ free_dma_tx_desc_resources(priv, dma_conf);
+ return ret;
+}
+
+/**
+ * alloc_dma_desc_resources - alloc TX/RX resources.
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ * Description: according to which descriptor can be used (extend or basic)
+ * this function allocates the resources for TX and RX paths. In case of
+ * reception, for example, it pre-allocated the RX socket buffer in order to
+ * allow zero-copy mechanism.
+ */
+static int alloc_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ /* RX Allocation */
+ int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
+
+ if (ret)
+ return ret;
+
+ ret = alloc_dma_tx_desc_resources(priv, dma_conf);
+
+ return ret;
+}
+
+/**
+ * free_dma_desc_resources - free dma desc resources
+ * @priv: private structure
+ * @dma_conf: structure to take the dma data
+ */
+static void free_dma_desc_resources(struct stmmac_priv *priv,
+ struct stmmac_dma_conf *dma_conf)
+{
+ /* Release the DMA TX socket buffers */
+ free_dma_tx_desc_resources(priv, dma_conf);
+
+ /* Release the DMA RX socket buffers later
+ * to ensure all pending XDP_TX buffers are returned.
+ */
+ free_dma_rx_desc_resources(priv, dma_conf);
+}
+
+/**
+ * stmmac_mac_enable_rx_queues - Enable MAC rx queues
+ * @priv: driver private structure
+ * Description: It is used for enabling the rx queues in the MAC
+ */
+static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ int queue;
+ u8 mode;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
+ stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
+ }
+}
+
+/**
+ * stmmac_start_rx_dma - start RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This starts a RX DMA channel
+ */
+static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
+ stmmac_start_rx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_start_tx_dma - start TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This starts a TX DMA channel
+ */
+static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
+ stmmac_start_tx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_rx_dma - stop RX DMA channel
+ * @priv: driver private structure
+ * @chan: RX channel index
+ * Description:
+ * This stops a RX DMA channel
+ */
+static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
+ stmmac_stop_rx(priv, priv->ioaddr, chan);
+}
+
+/**
+ * stmmac_stop_tx_dma - stop TX DMA channel
+ * @priv: driver private structure
+ * @chan: TX channel index
+ * Description:
+ * This stops a TX DMA channel
+ */
+static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
+{
+ netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
+ stmmac_stop_tx(priv, priv->ioaddr, chan);
+}
+
+static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ u32 chan;
+
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ struct stmmac_channel *ch = &priv->channel[chan];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+}
+
+/**
+ * stmmac_start_all_dma - start all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This starts all the RX and TX DMA channels
+ */
+static void stmmac_start_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_start_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_start_tx_dma(priv, chan);
+}
+
+/**
+ * stmmac_stop_all_dma - stop all RX and TX DMA channels
+ * @priv: driver private structure
+ * Description:
+ * This stops the RX and TX DMA channels
+ */
+static void stmmac_stop_all_dma(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan = 0;
+
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_stop_rx_dma(priv, chan);
+
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_stop_tx_dma(priv, chan);
+}
+
+/**
+ * stmmac_dma_operation_mode - HW DMA operation mode
+ * @priv: driver private structure
+ * Description: it is used for configuring the DMA operation mode register in
+ * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
+ */
+static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ int rxfifosz = priv->plat->rx_fifo_size;
+ int txfifosz = priv->plat->tx_fifo_size;
+ u32 txmode = 0;
+ u32 rxmode = 0;
+ u32 chan = 0;
+ u8 qmode = 0;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ /* Adjust for real per queue fifo size */
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+
+ if (priv->plat->force_thresh_dma_mode) {
+ txmode = tc;
+ rxmode = tc;
+ } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
+ /*
+ * In case of GMAC, SF mode can be enabled
+ * to perform the TX COE in HW. This depends on:
+ * 1) TX COE if actually supported
+ * 2) There is no bugged Jumbo frame support
+ * that needs to not insert csum in the TDES.
+ */
+ txmode = SF_DMA_MODE;
+ rxmode = SF_DMA_MODE;
+ priv->xstats.threshold = SF_DMA_MODE;
+ } else {
+ txmode = tc;
+ rxmode = SF_DMA_MODE;
+ }
+
+ /* configure all channels */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ u32 buf_size;
+
+ qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
+ rxfifosz, qmode);
+
+ if (rx_q->xsk_pool) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ chan);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_conf.dma_buf_sz,
+ chan);
+ }
+ }
+
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
+ txfifosz, qmode);
+ }
+}
+
+static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+{
+ struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct xsk_buff_pool *pool = tx_q->xsk_pool;
+ unsigned int entry = tx_q->cur_tx;
+ struct dma_desc *tx_desc = NULL;
+ struct xdp_desc xdp_desc;
+ bool work_done = true;
+
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+
+ budget = min(budget, stmmac_tx_avail(priv, queue));
+
+ while (budget-- > 0) {
+ dma_addr_t dma_addr;
+ bool set_ic;
+
+ /* We are sharing with slow path and stop XSK TX desc submission when
+ * available TX ring is less than threshold.
+ */
+ if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
+ !netif_carrier_ok(priv->dev)) {
+ work_done = false;
+ break;
+ }
+
+ if (!xsk_tx_peek_desc(pool, &xdp_desc))
+ break;
+
+ if (likely(priv->extend_desc))
+ tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_desc = &tx_q->dma_entx[entry].basic;
+ else
+ tx_desc = tx_q->dma_tx + entry;
+
+ dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
+ xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
+
+ /* To return XDP buffer to XSK pool, we simple call
+ * xsk_tx_completed(), so we don't need to fill up
+ * 'buf' and 'xdpf'.
+ */
+ tx_q->tx_skbuff_dma[entry].buf = 0;
+ tx_q->xdpf[entry] = NULL;
+
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
+ tx_q->tx_skbuff_dma[entry].last_segment = true;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ stmmac_set_desc_addr(priv, tx_desc, dma_addr);
+
+ tx_q->tx_count_frames++;
+
+ if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, tx_desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
+ true, priv->mode, true, true,
+ xdp_desc.len);
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+ entry = tx_q->cur_tx;
+ }
+
+ if (tx_desc) {
+ stmmac_flush_tx_descriptors(priv, queue);
+ xsk_tx_release(pool);
+ }
+
+ /* Return true if all of the 3 conditions are met
+ * a) TX Budget is still available
+ * b) work_done = true when XSK TX desc peek is empty (no more
+ * pending XSK TX for transmission)
+ */
+ return !!budget && work_done;
+}
+
+static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
+{
+ if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
+ tc += 64;
+
+ if (priv->plat->force_thresh_dma_mode)
+ stmmac_set_dma_operation_mode(priv, tc, tc, chan);
+ else
+ stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
+ chan);
+
+ priv->xstats.threshold = tc;
+ }
+}
+
+/**
+ * stmmac_tx_clean - to manage the transmission completion
+ * @priv: driver private structure
+ * @budget: napi budget limiting this functions packet handling
+ * @queue: TX queue index
+ * Description: it reclaims the transmit resources after transmission completes.
+ */
+static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ unsigned int entry, xmits = 0, count = 0;
+
+ __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+ priv->xstats.tx_clean++;
+
+ tx_q->xsk_frames_done = 0;
+
+ entry = tx_q->dirty_tx;
+
+ /* Try to clean all TX complete frame in 1 shot */
+ while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
+ struct xdp_frame *xdpf;
+ struct sk_buff *skb;
+ struct dma_desc *p;
+ int status;
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
+ xdpf = tx_q->xdpf[entry];
+ skb = NULL;
+ } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
+ xdpf = NULL;
+ skb = tx_q->tx_skbuff[entry];
+ } else {
+ xdpf = NULL;
+ skb = NULL;
+ }
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[entry].basic;
+ else
+ p = tx_q->dma_tx + entry;
+
+ status = stmmac_tx_status(priv, &priv->dev->stats,
+ &priv->xstats, p, priv->ioaddr);
+ /* Check if the descriptor is owned by the DMA */
+ if (unlikely(status & tx_dma_own))
+ break;
+
+ count++;
+
+ /* Make sure descriptor fields are read after reading
+ * the own bit.
+ */
+ dma_rmb();
+
+ /* Just consider the last segment and ...*/
+ if (likely(!(status & tx_not_ls))) {
+ /* ... verify the status error condition */
+ if (unlikely(status & tx_err)) {
+ priv->dev->stats.tx_errors++;
+ if (unlikely(status & tx_err_bump_tc))
+ stmmac_bump_dma_threshold(priv, queue);
+ } else {
+ priv->dev->stats.tx_packets++;
+ priv->xstats.tx_pkt_n++;
+ priv->xstats.txq_stats[queue].tx_pkt_n++;
+ }
+ if (skb)
+ stmmac_get_tx_hwtstamp(priv, p, skb);
+ }
+
+ if (likely(tx_q->tx_skbuff_dma[entry].buf &&
+ tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
+ if (tx_q->tx_skbuff_dma[entry].map_as_page)
+ dma_unmap_page(priv->device,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(priv->device,
+ tx_q->tx_skbuff_dma[entry].buf,
+ tx_q->tx_skbuff_dma[entry].len,
+ DMA_TO_DEVICE);
+ tx_q->tx_skbuff_dma[entry].buf = 0;
+ tx_q->tx_skbuff_dma[entry].len = 0;
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ }
+
+ stmmac_clean_desc3(priv, tx_q, p);
+
+ tx_q->tx_skbuff_dma[entry].last_segment = false;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ if (xdpf &&
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
+ xdp_return_frame_rx_napi(xdpf);
+ tx_q->xdpf[entry] = NULL;
+ }
+
+ if (xdpf &&
+ tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
+ xdp_return_frame(xdpf);
+ tx_q->xdpf[entry] = NULL;
+ }
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
+ tx_q->xsk_frames_done++;
+
+ if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
+ if (likely(skb)) {
+ pkts_compl++;
+ bytes_compl += skb->len;
+ dev_consume_skb_any(skb);
+ tx_q->tx_skbuff[entry] = NULL;
+ }
+ }
+
+ stmmac_release_tx_desc(priv, p, priv->mode);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+ }
+ tx_q->dirty_tx = entry;
+
+ netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
+ pkts_compl, bytes_compl);
+
+ if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
+ queue))) &&
+ stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
+
+ netif_dbg(priv, tx_done, priv->dev,
+ "%s: restart transmit\n", __func__);
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+ if (tx_q->xsk_pool) {
+ bool work_done;
+
+ if (tx_q->xsk_frames_done)
+ xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
+
+ if (xsk_uses_need_wakeup(tx_q->xsk_pool))
+ xsk_set_tx_need_wakeup(tx_q->xsk_pool);
+
+ /* For XSK TX, we try to send as many as possible.
+ * If XSK work done (XSK TX desc empty and budget still
+ * available), return "budget - 1" to reenable TX IRQ.
+ * Else, return "budget" to make NAPI continue polling.
+ */
+ work_done = stmmac_xdp_xmit_zc(priv, queue,
+ STMMAC_XSK_TX_BUDGET_MAX);
+ if (work_done)
+ xmits = budget - 1;
+ else
+ xmits = budget;
+ }
+
+ if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
+ priv->eee_sw_timer_en) {
+ if (stmmac_enable_eee_mode(priv))
+ mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
+ }
+
+ /* We still have pending packets, let's call for a new scheduling */
+ if (tx_q->dirty_tx != tx_q->cur_tx)
+ stmmac_tx_timer_arm(priv, queue);
+
+ __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
+
+ /* Combine decisions from TX clean and XSK TX */
+ return max(count, xmits);
+}
+
+/**
+ * stmmac_tx_err - to manage the tx error
+ * @priv: driver private structure
+ * @chan: channel index
+ * Description: it cleans the descriptors and restarts the transmission
+ * in case of transmission errors.
+ */
+static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
+
+ stmmac_stop_tx_dma(priv, chan);
+ dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
+ stmmac_reset_tx_queue(priv, chan);
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+ stmmac_start_tx_dma(priv, chan);
+
+ priv->dev->stats.tx_errors++;
+ netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
+}
+
+/**
+ * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
+ * @priv: driver private structure
+ * @txmode: TX operating mode
+ * @rxmode: RX operating mode
+ * @chan: channel index
+ * Description: it is used for configuring of the DMA operation mode in
+ * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
+ * mode.
+ */
+static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
+ u32 rxmode, u32 chan)
+{
+ u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
+ u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ int rxfifosz = priv->plat->rx_fifo_size;
+ int txfifosz = priv->plat->tx_fifo_size;
+
+ if (rxfifosz == 0)
+ rxfifosz = priv->dma_cap.rx_fifo_size;
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ /* Adjust for real per queue fifo size */
+ rxfifosz /= rx_channels_count;
+ txfifosz /= tx_channels_count;
+
+ stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
+ stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
+}
+
+static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
+{
+ int ret;
+
+ ret = stmmac_safety_feat_irq_status(priv, priv->dev,
+ priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
+ if (ret && (ret != -EINVAL)) {
+ stmmac_global_err(priv);
+ return true;
+ }
+
+ return false;
+}
+
+static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
+{
+ int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
+ &priv->xstats, chan, dir);
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ struct stmmac_channel *ch = &priv->channel[chan];
+ struct napi_struct *rx_napi;
+ struct napi_struct *tx_napi;
+ unsigned long flags;
+
+ rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
+ tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
+
+ if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
+ if (napi_schedule_prep(rx_napi)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule(rx_napi);
+ }
+ }
+
+ if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
+ if (napi_schedule_prep(tx_napi)) {
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule(tx_napi);
+ }
+ }
+
+ return status;
+}
+
+/**
+ * stmmac_dma_interrupt - DMA ISR
+ * @priv: driver private structure
+ * Description: this is the DMA ISR. It is called by the main ISR.
+ * It calls the dwmac dma routine and schedule poll method in case of some
+ * work can be done.
+ */
+static void stmmac_dma_interrupt(struct stmmac_priv *priv)
+{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ u32 channels_to_check = tx_channel_count > rx_channel_count ?
+ tx_channel_count : rx_channel_count;
+ u32 chan;
+ int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
+
+ /* Make sure we never check beyond our status buffer. */
+ if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
+ channels_to_check = ARRAY_SIZE(status);
+
+ for (chan = 0; chan < channels_to_check; chan++)
+ status[chan] = stmmac_napi_check(priv, chan,
+ DMA_DIR_RXTX);
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ stmmac_bump_dma_threshold(priv, chan);
+ } else if (unlikely(status[chan] == tx_hard_error)) {
+ stmmac_tx_err(priv, chan);
+ }
+ }
+}
+
+/**
+ * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
+ * @priv: driver private structure
+ * Description: this masks the MMC irq, in fact, the counters are managed in SW.
+ */
+static void stmmac_mmc_setup(struct stmmac_priv *priv)
+{
+ unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
+ MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
+
+ stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
+
+ if (priv->dma_cap.rmon) {
+ stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
+ memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
+ } else
+ netdev_info(priv->dev, "No MAC Management Counters available\n");
+}
+
+/**
+ * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
+ * @priv: driver private structure
+ * Description:
+ * new GMAC chip generations have a new register to indicate the
+ * presence of the optional feature/functions.
+ * This can be also used to override the value passed through the
+ * platform and necessary for old MAC10/100 and GMAC chips.
+ */
+static int stmmac_get_hw_features(struct stmmac_priv *priv)
+{
+ return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
+}
+
+/**
+ * stmmac_check_ether_addr - check if the MAC addr is valid
+ * @priv: driver private structure
+ * Description:
+ * it is to verify if the MAC address is valid, in case of failures it
+ * generates a random MAC address
+ */
+static void stmmac_check_ether_addr(struct stmmac_priv *priv)
+{
+ u8 addr[ETH_ALEN];
+
+ if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+ stmmac_get_umac_addr(priv, priv->hw, addr, 0);
+ if (is_valid_ether_addr(addr))
+ eth_hw_addr_set(priv->dev, addr);
+ else
+ eth_hw_addr_random(priv->dev);
+ dev_info(priv->device, "device MAC address %pM\n",
+ priv->dev->dev_addr);
+ }
+}
+
+/**
+ * stmmac_init_dma_engine - DMA init.
+ * @priv: driver private structure
+ * Description:
+ * It inits the DMA invoking the specific MAC/GMAC callback.
+ * Some DMA parameters can be passed from the platform;
+ * in case of these are not passed a default is kept for the MAC or GMAC.
+ */
+static int stmmac_init_dma_engine(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 chan = 0;
+ int atds = 0;
+ int ret = 0;
+
+ if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
+ dev_err(priv->device, "Invalid DMA configuration\n");
+ return -EINVAL;
+ }
+
+ if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
+ atds = 1;
+
+ ret = stmmac_reset(priv, priv->ioaddr);
+ if (ret) {
+ dev_err(priv->device, "Failed to reset the dma\n");
+ return ret;
+ }
+
+ /* DMA Configuration */
+ stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
+
+ if (priv->plat->axi)
+ stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_channels_count; chan++) {
+ rx_q = &priv->dma_conf.rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_channels_count; chan++) {
+ tx_q = &priv->dma_conf.tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+ }
+
+ return ret;
+}
+
+static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ u32 tx_coal_timer = priv->tx_coal_timer[queue];
+
+ if (!tx_coal_timer)
+ return;
+
+ hrtimer_start(&tx_q->txtimer,
+ STMMAC_COAL_TIMER(tx_coal_timer),
+ HRTIMER_MODE_REL);
+}
+
+/**
+ * stmmac_tx_timer - mitigation sw timer for tx.
+ * @t: data pointer
+ * Description:
+ * This is the timer handler to directly invoke the stmmac_tx_clean.
+ */
+static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
+{
+ struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
+ struct stmmac_priv *priv = tx_q->priv_data;
+ struct stmmac_channel *ch;
+ struct napi_struct *napi;
+
+ ch = &priv->channel[tx_q->queue_index];
+ napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
+
+ if (likely(napi_schedule_prep(napi))) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ __napi_schedule(napi);
+ }
+
+ return HRTIMER_NORESTART;
+}
+
+/**
+ * stmmac_init_coalesce - init mitigation options.
+ * @priv: driver private structure
+ * Description:
+ * This inits the coalesce parameters: i.e. timer rate,
+ * timer handler and default threshold used for enabling the
+ * interrupt on completion bit.
+ */
+static void stmmac_init_coalesce(struct stmmac_priv *priv)
+{
+ u32 tx_channel_count = priv->plat->tx_queues_to_use;
+ u32 rx_channel_count = priv->plat->rx_queues_to_use;
+ u32 chan;
+
+ for (chan = 0; chan < tx_channel_count; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+
+ priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
+ priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ for (chan = 0; chan < rx_channel_count; chan++)
+ priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
+}
+
+static void stmmac_set_rings_length(struct stmmac_priv *priv)
+{
+ u32 rx_channels_count = priv->plat->rx_queues_to_use;
+ u32 tx_channels_count = priv->plat->tx_queues_to_use;
+ u32 chan;
+
+ /* set TX ring length */
+ for (chan = 0; chan < tx_channels_count; chan++)
+ stmmac_set_tx_ring_len(priv, priv->ioaddr,
+ (priv->dma_conf.dma_tx_size - 1), chan);
+
+ /* set RX ring length */
+ for (chan = 0; chan < rx_channels_count; chan++)
+ stmmac_set_rx_ring_len(priv, priv->ioaddr,
+ (priv->dma_conf.dma_rx_size - 1), chan);
+}
+
+/**
+ * stmmac_set_tx_queue_weight - Set TX queue weight
+ * @priv: driver private structure
+ * Description: It is used for setting TX queues weight
+ */
+static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 weight;
+ u32 queue;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ weight = priv->plat->tx_queues_cfg[queue].weight;
+ stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
+ }
+}
+
+/**
+ * stmmac_configure_cbs - Configure CBS in TX queue
+ * @priv: driver private structure
+ * Description: It is used for configuring CBS in AVB TX queues
+ */
+static void stmmac_configure_cbs(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 mode_to_use;
+ u32 queue;
+
+ /* queue 0 is reserved for legacy traffic */
+ for (queue = 1; queue < tx_queues_count; queue++) {
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB)
+ continue;
+
+ stmmac_config_cbs(priv, priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ }
+}
+
+/**
+ * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
+ * @priv: driver private structure
+ * Description: It is used for mapping RX queues to RX dma channels
+ */
+static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 chan;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ chan = priv->plat->rx_queues_cfg[queue].chan;
+ stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX Queue Priority
+ */
+static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ if (!priv->plat->rx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->rx_queues_cfg[queue].prio;
+ stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
+ * @priv: driver private structure
+ * Description: It is used for configuring the TX Queue Priority
+ */
+static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+ u32 prio;
+
+ for (queue = 0; queue < tx_queues_count; queue++) {
+ if (!priv->plat->tx_queues_cfg[queue].use_prio)
+ continue;
+
+ prio = priv->plat->tx_queues_cfg[queue].prio;
+ stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
+ }
+}
+
+/**
+ * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
+ * @priv: driver private structure
+ * Description: It is used for configuring the RX queue routing
+ */
+static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 queue;
+ u8 packet;
+
+ for (queue = 0; queue < rx_queues_count; queue++) {
+ /* no specific packet type routing specified for the queue */
+ if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
+ continue;
+
+ packet = priv->plat->rx_queues_cfg[queue].pkt_route;
+ stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
+ }
+}
+
+static void stmmac_mac_config_rss(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
+ priv->rss.enable = false;
+ return;
+ }
+
+ if (priv->dev->features & NETIF_F_RXHASH)
+ priv->rss.enable = true;
+ else
+ priv->rss.enable = false;
+
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+}
+
+/**
+ * stmmac_mtl_configuration - Configure MTL
+ * @priv: driver private structure
+ * Description: It is used for configurring MTL
+ */
+static void stmmac_mtl_configuration(struct stmmac_priv *priv)
+{
+ u32 rx_queues_count = priv->plat->rx_queues_to_use;
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+
+ if (tx_queues_count > 1)
+ stmmac_set_tx_queue_weight(priv);
+
+ /* Configure MTL RX algorithms */
+ if (rx_queues_count > 1)
+ stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
+ priv->plat->rx_sched_algorithm);
+
+ /* Configure MTL TX algorithms */
+ if (tx_queues_count > 1)
+ stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
+ priv->plat->tx_sched_algorithm);
+
+ /* Configure CBS in AVB TX queues */
+ if (tx_queues_count > 1)
+ stmmac_configure_cbs(priv);
+
+ /* Map RX MTL to DMA channels */
+ stmmac_rx_queue_dma_chan_map(priv);
+
+ /* Enable MAC RX Queues */
+ stmmac_mac_enable_rx_queues(priv);
+
+ /* Set RX priorities */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rx_queues_prio(priv);
+
+ /* Set TX priorities */
+ if (tx_queues_count > 1)
+ stmmac_mac_config_tx_queues_prio(priv);
+
+ /* Set RX routing */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rx_queues_routing(priv);
+
+ /* Receive Side Scaling */
+ if (rx_queues_count > 1)
+ stmmac_mac_config_rss(priv);
+}
+
+static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
+{
+ if (priv->dma_cap.asp) {
+ netdev_info(priv->dev, "Enabling Safety Features\n");
+ stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
+ priv->plat->safety_feat_cfg);
+ } else {
+ netdev_info(priv->dev, "No Safety Features support found\n");
+ }
+}
+
+static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
+{
+ char *name;
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+ clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
+
+ name = priv->wq_name;
+ sprintf(name, "%s-fpe", priv->dev->name);
+
+ priv->fpe_wq = create_singlethread_workqueue(name);
+ if (!priv->fpe_wq) {
+ netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
+
+ return -ENOMEM;
+ }
+ netdev_info(priv->dev, "FPE workqueue start");
+
+ return 0;
+}
+
+/**
+ * stmmac_hw_setup - setup mac in a usable state.
+ * @dev : pointer to the device structure.
+ * @ptp_register: register PTP if set
+ * Description:
+ * this is the main function to setup the HW in a usable state because the
+ * dma engine is reset, the core registers are configured (e.g. AXI,
+ * Checksum features, timers). The DMA is ready to start receiving and
+ * transmitting.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ /* DMA initialization and SW reset */
+ ret = stmmac_init_dma_engine(priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
+ __func__);
+ return ret;
+ }
+
+ /* Copy the MAC addr into the HW */
+ stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
+
+ /* PS and related bits will be programmed according to the speed */
+ if (priv->hw->pcs) {
+ int speed = priv->plat->mac_port_sel_speed;
+
+ if ((speed == SPEED_10) || (speed == SPEED_100) ||
+ (speed == SPEED_1000)) {
+ priv->hw->ps = speed;
+ } else {
+ dev_warn(priv->device, "invalid port speed\n");
+ priv->hw->ps = 0;
+ }
+ }
+
+ /* Initialize the MAC Core */
+ stmmac_core_init(priv, priv->hw, dev);
+
+ /* Initialize MTL*/
+ stmmac_mtl_configuration(priv);
+
+ /* Initialize Safety Features */
+ stmmac_safety_feat_configuration(priv);
+
+ ret = stmmac_rx_ipc(priv, priv->hw);
+ if (!ret) {
+ netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
+ priv->plat->rx_coe = STMMAC_RX_COE_NONE;
+ priv->hw->rx_csum = 0;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Set the HW DMA mode and the COE */
+ stmmac_dma_operation_mode(priv);
+
+ stmmac_mmc_setup(priv);
+
+ if (ptp_register) {
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0)
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ }
+
+ ret = stmmac_init_ptp(priv);
+ if (ret == -EOPNOTSUPP)
+ netdev_info(priv->dev, "PTP not supported by HW\n");
+ else if (ret)
+ netdev_warn(priv->dev, "PTP init failed\n");
+ else if (ptp_register)
+ stmmac_ptp_register(priv);
+
+ priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
+
+ /* Convert the timer from msec to usec */
+ if (!priv->tx_lpi_timer)
+ priv->tx_lpi_timer = eee_timer * 1000;
+
+ if (priv->use_riwt) {
+ u32 queue;
+
+ for (queue = 0; queue < rx_cnt; queue++) {
+ if (!priv->rx_riwt[queue])
+ priv->rx_riwt[queue] = DEF_DMA_RIWT;
+
+ stmmac_rx_watchdog(priv, priv->ioaddr,
+ priv->rx_riwt[queue], queue);
+ }
+ }
+
+ if (priv->hw->pcs)
+ stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
+
+ /* set TX and RX rings length */
+ stmmac_set_rings_length(priv);
+
+ /* Enable TSO */
+ if (priv->tso) {
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+
+ /* TSO and TBS cannot co-exist */
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ continue;
+
+ stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
+ }
+ }
+
+ /* Enable Split Header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ for (chan = 0; chan < rx_cnt; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+
+
+ /* VLAN Tag Insertion */
+ if (priv->dma_cap.vlins)
+ stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
+
+ /* TBS */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
+ int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
+
+ stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
+ }
+
+ /* Configure real RX and TX queues */
+ netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
+ netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
+
+ /* Start the ball rolling... */
+ stmmac_start_all_dma(priv);
+
+ if (priv->dma_cap.fpesel) {
+ stmmac_fpe_start_wq(priv);
+
+ if (priv->plat->fpe_cfg->enable)
+ stmmac_fpe_handshake(priv, true);
+ }
+
+ return 0;
+}
+
+static void stmmac_hw_teardown(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+}
+
+static void stmmac_free_irq(struct net_device *dev,
+ enum request_irq_err irq_err, int irq_idx)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int j;
+
+ switch (irq_err) {
+ case REQ_IRQ_ERR_ALL:
+ irq_idx = priv->plat->tx_queues_to_use;
+ fallthrough;
+ case REQ_IRQ_ERR_TX:
+ for (j = irq_idx - 1; j >= 0; j--) {
+ if (priv->tx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->tx_irq[j], NULL);
+ free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
+ }
+ }
+ irq_idx = priv->plat->rx_queues_to_use;
+ fallthrough;
+ case REQ_IRQ_ERR_RX:
+ for (j = irq_idx - 1; j >= 0; j--) {
+ if (priv->rx_irq[j] > 0) {
+ irq_set_affinity_hint(priv->rx_irq[j], NULL);
+ free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
+ }
+ }
+
+ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
+ free_irq(priv->sfty_ue_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_SFTY_UE:
+ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
+ free_irq(priv->sfty_ce_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_SFTY_CE:
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
+ free_irq(priv->lpi_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_LPI:
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
+ free_irq(priv->wol_irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_WOL:
+ free_irq(dev->irq, dev);
+ fallthrough;
+ case REQ_IRQ_ERR_MAC:
+ case REQ_IRQ_ERR_NO:
+ /* If MAC IRQ request error, no more IRQ to free */
+ break;
+ }
+}
+
+static int stmmac_request_irq_multi_msi(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
+ cpumask_t cpu_mask;
+ int irq_idx = 0;
+ char *int_name;
+ int ret;
+ int i;
+
+ /* For common interrupt */
+ int_name = priv->int_name_mac;
+ sprintf(int_name, "%s:%s", dev->name, "mac");
+ ret = request_irq(dev->irq, stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc mac MSI %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ irq_err = REQ_IRQ_ERR_MAC;
+ goto irq_error;
+ }
+
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
+ priv->wol_irq_disabled = true;
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ int_name = priv->int_name_wol;
+ sprintf(int_name, "%s:%s", dev->name, "wol");
+ ret = request_irq(priv->wol_irq,
+ stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc wol MSI %d (error: %d)\n",
+ __func__, priv->wol_irq, ret);
+ irq_err = REQ_IRQ_ERR_WOL;
+ goto irq_error;
+ }
+ }
+
+ /* Request the LPI IRQ in case of another line
+ * is used for LPI
+ */
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
+ int_name = priv->int_name_lpi;
+ sprintf(int_name, "%s:%s", dev->name, "lpi");
+ ret = request_irq(priv->lpi_irq,
+ stmmac_mac_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc lpi MSI %d (error: %d)\n",
+ __func__, priv->lpi_irq, ret);
+ irq_err = REQ_IRQ_ERR_LPI;
+ goto irq_error;
+ }
+ }
+
+ /* Request the Safety Feature Correctible Error line in
+ * case of another line is used
+ */
+ if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
+ int_name = priv->int_name_sfty_ce;
+ sprintf(int_name, "%s:%s", dev->name, "safety-ce");
+ ret = request_irq(priv->sfty_ce_irq,
+ stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty ce MSI %d (error: %d)\n",
+ __func__, priv->sfty_ce_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY_CE;
+ goto irq_error;
+ }
+ }
+
+ /* Request the Safety Feature Uncorrectible Error line in
+ * case of another line is used
+ */
+ if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
+ int_name = priv->int_name_sfty_ue;
+ sprintf(int_name, "%s:%s", dev->name, "safety-ue");
+ ret = request_irq(priv->sfty_ue_irq,
+ stmmac_safety_interrupt,
+ 0, int_name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc sfty ue MSI %d (error: %d)\n",
+ __func__, priv->sfty_ue_irq, ret);
+ irq_err = REQ_IRQ_ERR_SFTY_UE;
+ goto irq_error;
+ }
+ }
+
+ /* Request Rx MSI irq */
+ for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
+ if (i >= MTL_MAX_RX_QUEUES)
+ break;
+ if (priv->rx_irq[i] == 0)
+ continue;
+
+ int_name = priv->int_name_rx_irq[i];
+ sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
+ ret = request_irq(priv->rx_irq[i],
+ stmmac_msi_intr_rx,
+ 0, int_name, &priv->dma_conf.rx_queue[i]);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc rx-%d MSI %d (error: %d)\n",
+ __func__, i, priv->rx_irq[i], ret);
+ irq_err = REQ_IRQ_ERR_RX;
+ irq_idx = i;
+ goto irq_error;
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
+ }
+
+ /* Request Tx MSI irq */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
+ if (i >= MTL_MAX_TX_QUEUES)
+ break;
+ if (priv->tx_irq[i] == 0)
+ continue;
+
+ int_name = priv->int_name_tx_irq[i];
+ sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
+ ret = request_irq(priv->tx_irq[i],
+ stmmac_msi_intr_tx,
+ 0, int_name, &priv->dma_conf.tx_queue[i]);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: alloc tx-%d MSI %d (error: %d)\n",
+ __func__, i, priv->tx_irq[i], ret);
+ irq_err = REQ_IRQ_ERR_TX;
+ irq_idx = i;
+ goto irq_error;
+ }
+ cpumask_clear(&cpu_mask);
+ cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
+ irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
+ }
+
+ return 0;
+
+irq_error:
+ stmmac_free_irq(dev, irq_err, irq_idx);
+ return ret;
+}
+
+static int stmmac_request_irq_single(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ enum request_irq_err irq_err;
+ int ret;
+
+ ret = request_irq(dev->irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the IRQ %d (error: %d)\n",
+ __func__, dev->irq, ret);
+ irq_err = REQ_IRQ_ERR_MAC;
+ goto irq_error;
+ }
+
+ /* Request the Wake IRQ in case of another line
+ * is used for WoL
+ */
+ if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
+ ret = request_irq(priv->wol_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
+ __func__, priv->wol_irq, ret);
+ irq_err = REQ_IRQ_ERR_WOL;
+ goto irq_error;
+ }
+ }
+
+ /* Request the IRQ lines */
+ if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
+ ret = request_irq(priv->lpi_irq, stmmac_interrupt,
+ IRQF_SHARED, dev->name, dev);
+ if (unlikely(ret < 0)) {
+ netdev_err(priv->dev,
+ "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
+ __func__, priv->lpi_irq, ret);
+ irq_err = REQ_IRQ_ERR_LPI;
+ goto irq_error;
+ }
+ }
+
+ return 0;
+
+irq_error:
+ stmmac_free_irq(dev, irq_err, 0);
+ return ret;
+}
+
+static int stmmac_request_irq(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret;
+
+ /* Request the IRQ lines */
+ if (priv->plat->multi_msi_en)
+ ret = stmmac_request_irq_multi_msi(dev);
+ else
+ ret = stmmac_request_irq_single(dev);
+
+ return ret;
+}
+
+/**
+ * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
+ * @priv: driver private structure
+ * @mtu: MTU to setup the dma queue and buf with
+ * Description: Allocate and generate a dma_conf based on the provided MTU.
+ * Allocate the Tx/Rx DMA queue and init them.
+ * Return value:
+ * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
+ */
+static struct stmmac_dma_conf *
+stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
+{
+ struct stmmac_dma_conf *dma_conf;
+ int chan, bfsize, ret;
+
+ dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
+ if (!dma_conf) {
+ netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
+ __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ bfsize = stmmac_set_16kib_bfsize(priv, mtu);
+ if (bfsize < 0)
+ bfsize = 0;
+
+ if (bfsize < BUF_SIZE_16KiB)
+ bfsize = stmmac_set_bfsize(mtu, 0);
+
+ dma_conf->dma_buf_sz = bfsize;
+ /* Chose the tx/rx size from the already defined one in the
+ * priv struct. (if defined)
+ */
+ dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
+ dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
+
+ if (!dma_conf->dma_tx_size)
+ dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
+ if (!dma_conf->dma_rx_size)
+ dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
+
+ /* Earlier check for TBS */
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
+ struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
+ int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
+
+ /* Setup per-TXQ tbs flag before TX descriptor alloc */
+ tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
+ }
+
+ ret = alloc_dma_desc_resources(priv, dma_conf);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto alloc_error;
+ }
+
+ ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ return dma_conf;
+
+init_error:
+ free_dma_desc_resources(priv, dma_conf);
+alloc_error:
+ kfree(dma_conf);
+ return ERR_PTR(ret);
+}
+
+/**
+ * __stmmac_open - open entry point of the driver
+ * @dev : pointer to the device structure.
+ * @dma_conf : structure to take the dma data
+ * Description:
+ * This function is the open entry point of the driver.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int __stmmac_open(struct net_device *dev,
+ struct stmmac_dma_conf *dma_conf)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int mode = priv->plat->phy_interface;
+ u32 chan;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI &&
+ (!priv->hw->xpcs ||
+ xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) {
+ ret = stmmac_init_phy(dev);
+ if (ret) {
+ netdev_err(priv->dev,
+ "%s: Cannot attach to PHY (error: %d)\n",
+ __func__, ret);
+ goto init_phy_error;
+ }
+ }
+
+ /* Extra statistics */
+ memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
+ priv->xstats.threshold = tc;
+
+ priv->rx_copybreak = STMMAC_RX_COPYBREAK;
+
+ buf_sz = dma_conf->dma_buf_sz;
+ memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
+
+ stmmac_reset_queues_param(priv);
+
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Serdes powerup failed\n",
+ __func__);
+ goto init_error;
+ }
+ }
+
+ ret = stmmac_hw_setup(dev, true);
+ if (ret < 0) {
+ netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
+ goto init_error;
+ }
+
+ stmmac_init_coalesce(priv);
+
+ phylink_start(priv->phylink);
+ /* We may have called phylink_speed_down before */
+ phylink_speed_up(priv->phylink);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ stmmac_enable_all_queues(priv);
+ netif_tx_start_all_queues(priv->dev);
+ stmmac_enable_all_dma_irq(priv);
+
+ return 0;
+
+irq_error:
+ phylink_stop(priv->phylink);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ phylink_disconnect_phy(priv->phylink);
+init_phy_error:
+ pm_runtime_put(priv->device);
+ return ret;
+}
+
+static int stmmac_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_dma_conf *dma_conf;
+ int ret;
+
+ dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
+ if (IS_ERR(dma_conf))
+ return PTR_ERR(dma_conf);
+
+ ret = __stmmac_open(dev, dma_conf);
+ if (ret)
+ free_dma_desc_resources(priv, dma_conf);
+
+ kfree(dma_conf);
+ return ret;
+}
+
+static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
+{
+ set_bit(__FPE_REMOVING, &priv->fpe_task_state);
+
+ if (priv->fpe_wq)
+ destroy_workqueue(priv->fpe_wq);
+
+ netdev_info(priv->dev, "FPE workqueue stop");
+}
+
+/**
+ * stmmac_release - close entry point of the driver
+ * @dev : device pointer.
+ * Description:
+ * This is the stop entry point of the driver.
+ */
+static int stmmac_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+ /* Stop and disconnect the PHY */
+ phylink_stop(priv->phylink);
+ phylink_disconnect_phy(priv->phylink);
+
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ netif_tx_disable(dev);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
+ /* Stop TX/RX DMA and clear the descriptors */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv, &priv->dma_conf);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ /* Powerdown Serdes if there is */
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
+
+ netif_carrier_off(dev);
+
+ stmmac_release_ptp(priv);
+
+ pm_runtime_put(priv->device);
+
+ if (priv->dma_cap.fpesel)
+ stmmac_fpe_stop_wq(priv);
+
+ return 0;
+}
+
+static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
+ struct stmmac_tx_queue *tx_q)
+{
+ u16 tag = 0x0, inner_tag = 0x0;
+ u32 inner_type = 0x0;
+ struct dma_desc *p;
+
+ if (!priv->dma_cap.vlins)
+ return false;
+ if (!skb_vlan_tag_present(skb))
+ return false;
+ if (skb->vlan_proto == htons(ETH_P_8021AD)) {
+ inner_tag = skb_vlan_tag_get(skb);
+ inner_type = STMMAC_VLAN_INSERT;
+ }
+
+ tag = skb_vlan_tag_get(skb);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ p = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ p = &tx_q->dma_tx[tx_q->cur_tx];
+
+ if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
+ return false;
+
+ stmmac_set_tx_owner(priv, p);
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+ return true;
+}
+
+/**
+ * stmmac_tso_allocator - close entry point of the driver
+ * @priv: driver private structure
+ * @des: buffer start address
+ * @total_len: total length to fill in descriptors
+ * @last_segment: condition for the last descriptor
+ * @queue: TX queue index
+ * Description:
+ * This function fills descriptor and request new descriptors according to
+ * buffer length to fill
+ */
+static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
+ int total_len, bool last_segment, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct dma_desc *desc;
+ u32 buff_size;
+ int tmp_len;
+
+ tmp_len = total_len;
+
+ while (tmp_len > 0) {
+ dma_addr_t curr_addr;
+
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_conf.dma_tx_size);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+
+ curr_addr = des + (total_len - tmp_len);
+ if (priv->dma_cap.addr64 <= 32)
+ desc->des0 = cpu_to_le32(curr_addr);
+ else
+ stmmac_set_desc_addr(priv, desc, curr_addr);
+
+ buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
+ TSO_MAX_BUFF_SIZE : tmp_len;
+
+ stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
+ 0, 1,
+ (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
+ 0, 0);
+
+ tmp_len -= TSO_MAX_BUFF_SIZE;
+ }
+}
+
+static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ int desc_size;
+
+ if (likely(priv->extend_desc))
+ desc_size = sizeof(struct dma_extended_desc);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc_size = sizeof(struct dma_edesc);
+ else
+ desc_size = sizeof(struct dma_desc);
+
+ /* The own bit must be the latest setting done when prepare the
+ * descriptor and then barrier is needed to make sure that
+ * all is coherent before granting the DMA engine.
+ */
+ wmb();
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
+}
+
+/**
+ * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description: this is the transmit function that is called on TSO frames
+ * (support available on GMAC4 and newer chips).
+ * Diagram below show the ring programming in case of TSO frames:
+ *
+ * First Descriptor
+ * --------
+ * | DES0 |---> buffer1 = L2/L3/L4 header
+ * | DES1 |---> TCP Payload (can continue on next descr...)
+ * | DES2 |---> buffer 1 and 2 len
+ * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
+ * --------
+ * |
+ * ...
+ * |
+ * --------
+ * | DES0 | --| Split TCP Payload on Buffers 1 and 2
+ * | DES1 | --|
+ * | DES2 | --> buffer 1 and 2 len
+ * | DES3 |
+ * --------
+ *
+ * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
+ */
+static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct dma_desc *desc, *first, *mss_desc = NULL;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ u32 queue = skb_get_queue_mapping(skb);
+ unsigned int first_entry, tx_packets;
+ int tmp_pay_len = 0, first_tx;
+ struct stmmac_tx_queue *tx_q;
+ bool has_vlan, set_ic;
+ u8 proto_hdr_len, hdr;
+ u32 pay_len, mss;
+ dma_addr_t des;
+ int i;
+
+ tx_q = &priv->dma_conf.tx_queue[queue];
+ first_tx = tx_q->cur_tx;
+
+ /* Compute header lengths */
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
+ proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
+ hdr = sizeof(struct udphdr);
+ } else {
+ proto_hdr_len = skb_tcp_all_headers(skb);
+ hdr = tcp_hdrlen(skb);
+ }
+
+ /* Desc availability based on threshold should be enough safe */
+ if (unlikely(stmmac_tx_avail(priv, queue) <
+ (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
+ /* This is a hard error, log it. */
+ netdev_err(priv->dev,
+ "%s: Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
+
+ mss = skb_shinfo(skb)->gso_size;
+
+ /* set new MSS value if needed */
+ if (mss != tx_q->mss) {
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
+
+ stmmac_set_mss(priv, mss_desc, mss);
+ tx_q->mss = mss;
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
+ priv->dma_conf.dma_tx_size);
+ WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
+ }
+
+ if (netif_msg_tx_queued(priv)) {
+ pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
+ __func__, hdr, proto_hdr_len, pay_len, mss);
+ pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
+ skb->data_len);
+ }
+
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
+ first_entry = tx_q->cur_tx;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[first_entry].basic;
+ else
+ desc = &tx_q->dma_tx[first_entry];
+ first = desc;
+
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
+ /* first descriptor: fill Headers on Buf1 */
+ des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
+ tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
+
+ if (priv->dma_cap.addr64 <= 32) {
+ first->des0 = cpu_to_le32(des);
+
+ /* Fill start of payload in buff2 of first descriptor */
+ if (pay_len)
+ first->des1 = cpu_to_le32(des + proto_hdr_len);
+
+ /* If needed take extra descriptors to fill the remaining payload */
+ tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
+ } else {
+ stmmac_set_desc_addr(priv, first, des);
+ tmp_pay_len = pay_len;
+ des += proto_hdr_len;
+ pay_len = 0;
+ }
+
+ stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
+
+ /* Prepare fragments */
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+ des = skb_frag_dma_map(priv->device, frag, 0,
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ stmmac_tso_allocator(priv, des, skb_frag_size(frag),
+ (i == nfrags - 1), queue);
+
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+ }
+
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
+
+ /* Only the last descriptor gets to point to the skb. */
+ tx_q->tx_skbuff[tx_q->cur_tx] = skb;
+ tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
+
+ /* Manage tx mitigation */
+ tx_packets = (tx_q->cur_tx + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames[queue])
+ set_ic = true;
+ else if ((tx_q->tx_count_frames %
+ priv->tx_coal_frames[queue]) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
+ else
+ desc = &tx_q->dma_tx[tx_q->cur_tx];
+
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ /* We've used all descriptors we need for this skb, however,
+ * advance cur_tx so that it references a fresh descriptor.
+ * ndo_start_xmit will fill this descriptor the next time it's
+ * called and stmmac_tx_clean may clean up to this descriptor.
+ */
+ tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
+
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+ dev->stats.tx_bytes += skb->len;
+ priv->xstats.tx_tso_frames++;
+ priv->xstats.tx_tso_nfrags += nfrags;
+
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
+ skb_tx_timestamp(skb);
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ stmmac_enable_tx_timestamp(priv, first);
+ }
+
+ /* Complete the first descriptor before granting the DMA */
+ stmmac_prepare_tso_tx_desc(priv, first, 1,
+ proto_hdr_len,
+ pay_len,
+ 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
+ hdr / 4, (skb->len - proto_hdr_len));
+
+ /* If context desc is used to change MSS */
+ if (mss_desc) {
+ /* Make sure that first descriptor has been completely
+ * written, including its own bit. This is because MSS is
+ * actually before first descriptor, so we need to make
+ * sure that MSS's own bit is the last thing written.
+ */
+ dma_wmb();
+ stmmac_set_tx_owner(priv, mss_desc);
+ }
+
+ if (netif_msg_pktdata(priv)) {
+ pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+ tx_q->cur_tx, first, nfrags);
+ pr_info(">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb_headlen(skb));
+ }
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ dev_err(priv->device, "Tx dma map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+/**
+ * stmmac_xmit - Tx entry point of the driver
+ * @skb : the socket buffer
+ * @dev : device pointer
+ * Description : this is the tx entry point of the driver.
+ * It programs the chain or the ring and supports oversized frames
+ * and SG feature.
+ */
+static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ unsigned int first_entry, tx_packets, enh_desc;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ unsigned int nopaged_len = skb_headlen(skb);
+ int i, csum_insertion = 0, is_jumbo = 0;
+ u32 queue = skb_get_queue_mapping(skb);
+ int nfrags = skb_shinfo(skb)->nr_frags;
+ int gso = skb_shinfo(skb)->gso_type;
+ struct dma_edesc *tbs_desc = NULL;
+ struct dma_desc *desc, *first;
+ struct stmmac_tx_queue *tx_q;
+ bool has_vlan, set_ic;
+ int entry, first_tx;
+ dma_addr_t des;
+
+ tx_q = &priv->dma_conf.tx_queue[queue];
+ first_tx = tx_q->cur_tx;
+
+ if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
+ stmmac_disable_eee_mode(priv);
+
+ /* Manage oversized TCP frames for GMAC4 device */
+ if (skb_is_gso(skb) && priv->tso) {
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ return stmmac_tso_xmit(skb, dev);
+ if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
+ return stmmac_tso_xmit(skb, dev);
+ }
+
+ if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
+ if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
+ queue));
+ /* This is a hard error, log it. */
+ netdev_err(priv->dev,
+ "%s: Tx Ring full when queue awake\n",
+ __func__);
+ }
+ return NETDEV_TX_BUSY;
+ }
+
+ /* Check if VLAN can be inserted by HW */
+ has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
+
+ entry = tx_q->cur_tx;
+ first_entry = entry;
+ WARN_ON(tx_q->tx_skbuff[first_entry]);
+
+ csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
+
+ if (likely(priv->extend_desc))
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
+ else
+ desc = tx_q->dma_tx + entry;
+
+ first = desc;
+
+ if (has_vlan)
+ stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
+
+ enh_desc = priv->plat->enh_desc;
+ /* To program the descriptors according to the size of the frame */
+ if (enh_desc)
+ is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
+
+ if (unlikely(is_jumbo)) {
+ entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
+ if (unlikely(entry < 0) && (entry != -EINVAL))
+ goto dma_map_err;
+ }
+
+ for (i = 0; i < nfrags; i++) {
+ const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ int len = skb_frag_size(frag);
+ bool last_segment = (i == (nfrags - 1));
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+ WARN_ON(tx_q->tx_skbuff[entry]);
+
+ if (likely(priv->extend_desc))
+ desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
+ else
+ desc = tx_q->dma_tx + entry;
+
+ des = skb_frag_dma_map(priv->device, frag, 0, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err; /* should reuse desc w/o issues */
+
+ tx_q->tx_skbuff_dma[entry].buf = des;
+
+ stmmac_set_desc_addr(priv, desc, des);
+
+ tx_q->tx_skbuff_dma[entry].map_as_page = true;
+ tx_q->tx_skbuff_dma[entry].len = len;
+ tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
+
+ /* Prepare the descriptor and set the own bit too */
+ stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
+ priv->mode, 1, last_segment, skb->len);
+ }
+
+ /* Only the last descriptor gets to point to the skb. */
+ tx_q->tx_skbuff[entry] = skb;
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
+
+ /* According to the coalesce parameter the IC bit for the latest
+ * segment is reset and the timer re-started to clean the tx status.
+ * This approach takes care about the fragments: desc is the first
+ * element in case of no SG.
+ */
+ tx_packets = (entry + 1) - first_tx;
+ tx_q->tx_count_frames += tx_packets;
+
+ if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
+ set_ic = true;
+ else if (!priv->tx_coal_frames[queue])
+ set_ic = false;
+ else if (tx_packets > priv->tx_coal_frames[queue])
+ set_ic = true;
+ else if ((tx_q->tx_count_frames %
+ priv->tx_coal_frames[queue]) < tx_packets)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ if (likely(priv->extend_desc))
+ desc = &tx_q->dma_etx[entry].basic;
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ desc = &tx_q->dma_entx[entry].basic;
+ else
+ desc = &tx_q->dma_tx[entry];
+
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ /* We've used all descriptors we need for this skb, however,
+ * advance cur_tx so that it references a fresh descriptor.
+ * ndo_start_xmit will fill this descriptor the next time it's
+ * called and stmmac_tx_clean may clean up to this descriptor.
+ */
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+ tx_q->cur_tx = entry;
+
+ if (netif_msg_pktdata(priv)) {
+ netdev_dbg(priv->dev,
+ "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
+ __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
+ entry, first, nfrags);
+
+ netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
+ print_pkt(skb->data, skb->len);
+ }
+
+ if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
+ netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
+ __func__);
+ netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
+ }
+
+ dev->stats.tx_bytes += skb->len;
+
+ if (priv->sarc_type)
+ stmmac_set_desc_sarc(priv, first, priv->sarc_type);
+
+ skb_tx_timestamp(skb);
+
+ /* Ready to fill the first descriptor and set the OWN bit w/o any
+ * problems because all the descriptors are actually ready to be
+ * passed to the DMA engine.
+ */
+ if (likely(!is_jumbo)) {
+ bool last_segment = (nfrags == 0);
+
+ des = dma_map_single(priv->device, skb->data,
+ nopaged_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, des))
+ goto dma_map_err;
+
+ tx_q->tx_skbuff_dma[first_entry].buf = des;
+ tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
+ tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
+
+ stmmac_set_desc_addr(priv, first, des);
+
+ tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
+ tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
+
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+ priv->hwts_tx_en)) {
+ /* declare that device is doing timestamping */
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ stmmac_enable_tx_timestamp(priv, first);
+ }
+
+ /* Prepare the first descriptor setting the OWN bit too */
+ stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
+ csum_insertion, priv->mode, 0, last_segment,
+ skb->len);
+ }
+
+ if (tx_q->tbs & STMMAC_TBS_EN) {
+ struct timespec64 ts = ns_to_timespec64(skb->tstamp);
+
+ tbs_desc = &tx_q->dma_entx[first_entry];
+ stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
+ }
+
+ stmmac_set_tx_owner(priv, first);
+
+ netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+
+ return NETDEV_TX_OK;
+
+dma_map_err:
+ netdev_err(priv->dev, "Tx DMA map failed\n");
+ dev_kfree_skb(skb);
+ priv->dev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+}
+
+static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
+{
+ struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
+ __be16 vlan_proto = veth->h_vlan_proto;
+ u16 vlanid;
+
+ if ((vlan_proto == htons(ETH_P_8021Q) &&
+ dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
+ (vlan_proto == htons(ETH_P_8021AD) &&
+ dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
+ /* pop the vlan tag */
+ vlanid = ntohs(veth->h_vlan_TCI);
+ memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
+ skb_pull(skb, VLAN_HLEN);
+ __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
+ }
+}
+
+/**
+ * stmmac_rx_refill - refill used skb preallocated buffers
+ * @priv: driver private structure
+ * @queue: RX queue index
+ * Description : this is to reallocate the skb for the reception process
+ * that is based on zero-copy.
+ */
+static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ int dirty = stmmac_rx_dirty(priv, queue);
+ unsigned int entry = rx_q->dirty_rx;
+ gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
+
+ if (priv->dma_cap.host_dma_width <= 32)
+ gfp |= GFP_DMA32;
+
+ while (dirty-- > 0) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
+ struct dma_desc *p;
+ bool use_rx_wd;
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ if (!buf->page) {
+ buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->page)
+ break;
+ }
+
+ if (priv->sph && !buf->sec_page) {
+ buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
+ if (!buf->sec_page)
+ break;
+
+ buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+ }
+
+ buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
+
+ stmmac_set_desc_addr(priv, p, buf->addr);
+ if (priv->sph)
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
+ else
+ stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
+ stmmac_refill_desc3(priv, rx_q, p);
+
+ rx_q->rx_count_frames++;
+ rx_q->rx_count_frames += priv->rx_coal_frames[queue];
+ if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
+ rx_q->rx_count_frames = 0;
+
+ use_rx_wd = !priv->rx_coal_frames[queue];
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
+
+ dma_wmb();
+ stmmac_set_rx_owner(priv, p, use_rx_wd);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
+ }
+ rx_q->dirty_rx = entry;
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+}
+
+static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ unsigned int plen = 0, hlen = 0;
+ int coe = priv->hw->rx_csum;
+
+ /* Not first descriptor, buffer is always zero */
+ if (priv->sph && len)
+ return 0;
+
+ /* First descriptor, get split header length */
+ stmmac_get_rx_header_len(priv, p, &hlen);
+ if (priv->sph && hlen) {
+ priv->xstats.rx_split_hdr_pkt_n++;
+ return hlen;
+ }
+
+ /* First descriptor, not last descriptor and not split header */
+ if (status & rx_not_ls)
+ return priv->dma_conf.dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* First descriptor and last descriptor and not split header */
+ return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
+}
+
+static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
+ struct dma_desc *p,
+ int status, unsigned int len)
+{
+ int coe = priv->hw->rx_csum;
+ unsigned int plen = 0;
+
+ /* Not split header, buffer is not available */
+ if (!priv->sph)
+ return 0;
+
+ /* Not last descriptor */
+ if (status & rx_not_ls)
+ return priv->dma_conf.dma_buf_sz;
+
+ plen = stmmac_get_rx_frame_len(priv, p, coe);
+
+ /* Last descriptor */
+ return plen - len;
+}
+
+static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
+ struct xdp_frame *xdpf, bool dma_map)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ unsigned int entry = tx_q->cur_tx;
+ struct dma_desc *tx_desc;
+ dma_addr_t dma_addr;
+ bool set_ic;
+
+ if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
+ return STMMAC_XDP_CONSUMED;
+
+ if (likely(priv->extend_desc))
+ tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
+ else if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ tx_desc = &tx_q->dma_entx[entry].basic;
+ else
+ tx_desc = tx_q->dma_tx + entry;
+
+ if (dma_map) {
+ dma_addr = dma_map_single(priv->device, xdpf->data,
+ xdpf->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->device, dma_addr))
+ return STMMAC_XDP_CONSUMED;
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
+ } else {
+ struct page *page = virt_to_page(xdpf->data);
+
+ dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
+ xdpf->headroom;
+ dma_sync_single_for_device(priv->device, dma_addr,
+ xdpf->len, DMA_BIDIRECTIONAL);
+
+ tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
+ }
+
+ tx_q->tx_skbuff_dma[entry].buf = dma_addr;
+ tx_q->tx_skbuff_dma[entry].map_as_page = false;
+ tx_q->tx_skbuff_dma[entry].len = xdpf->len;
+ tx_q->tx_skbuff_dma[entry].last_segment = true;
+ tx_q->tx_skbuff_dma[entry].is_jumbo = false;
+
+ tx_q->xdpf[entry] = xdpf;
+
+ stmmac_set_desc_addr(priv, tx_desc, dma_addr);
+
+ stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
+ true, priv->mode, true, true,
+ xdpf->len);
+
+ tx_q->tx_count_frames++;
+
+ if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
+ set_ic = true;
+ else
+ set_ic = false;
+
+ if (set_ic) {
+ tx_q->tx_count_frames = 0;
+ stmmac_set_tx_ic(priv, tx_desc);
+ priv->xstats.tx_set_ic_bit++;
+ }
+
+ stmmac_enable_dma_transmission(priv, priv->ioaddr);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
+ tx_q->cur_tx = entry;
+
+ return STMMAC_XDP_TX;
+}
+
+static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
+ int cpu)
+{
+ int index = cpu;
+
+ if (unlikely(index < 0))
+ index = 0;
+
+ while (index >= priv->plat->tx_queues_to_use)
+ index -= priv->plat->tx_queues_to_use;
+
+ return index;
+}
+
+static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
+ struct xdp_buff *xdp)
+{
+ struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ int queue;
+ int res;
+
+ if (unlikely(!xdpf))
+ return STMMAC_XDP_CONSUMED;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+ nq = netdev_get_tx_queue(priv->dev, queue);
+
+ __netif_tx_lock(nq, cpu);
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+
+ res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
+ if (res == STMMAC_XDP_TX)
+ stmmac_flush_tx_descriptors(priv, queue);
+
+ __netif_tx_unlock(nq);
+
+ return res;
+}
+
+static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
+ struct bpf_prog *prog,
+ struct xdp_buff *xdp)
+{
+ u32 act;
+ int res;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ switch (act) {
+ case XDP_PASS:
+ res = STMMAC_XDP_PASS;
+ break;
+ case XDP_TX:
+ res = stmmac_xdp_xmit_back(priv, xdp);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
+ res = STMMAC_XDP_CONSUMED;
+ else
+ res = STMMAC_XDP_REDIRECT;
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(priv->dev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(priv->dev, prog, act);
+ fallthrough;
+ case XDP_DROP:
+ res = STMMAC_XDP_CONSUMED;
+ break;
+ }
+
+ return res;
+}
+
+static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
+ struct xdp_buff *xdp)
+{
+ struct bpf_prog *prog;
+ int res;
+
+ prog = READ_ONCE(priv->xdp_prog);
+ if (!prog) {
+ res = STMMAC_XDP_PASS;
+ goto out;
+ }
+
+ res = __stmmac_xdp_run_prog(priv, prog, xdp);
+out:
+ return ERR_PTR(-res);
+}
+
+static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
+ int xdp_status)
+{
+ int cpu = smp_processor_id();
+ int queue;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+
+ if (xdp_status & STMMAC_XDP_TX)
+ stmmac_tx_timer_arm(priv, queue);
+
+ if (xdp_status & STMMAC_XDP_REDIRECT)
+ xdp_do_flush();
+}
+
+static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
+ struct xdp_buff *xdp)
+{
+ unsigned int metasize = xdp->data - xdp->data_meta;
+ unsigned int datasize = xdp->data_end - xdp->data;
+ struct sk_buff *skb;
+
+ skb = __napi_alloc_skb(&ch->rxtx_napi,
+ xdp->data_end - xdp->data_hard_start,
+ GFP_ATOMIC | __GFP_NOWARN);
+ if (unlikely(!skb))
+ return NULL;
+
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
+ memcpy(__skb_put(skb, datasize), xdp->data, datasize);
+ if (metasize)
+ skb_metadata_set(skb, metasize);
+
+ return skb;
+}
+
+static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
+ struct dma_desc *p, struct dma_desc *np,
+ struct xdp_buff *xdp)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int len = xdp->data_end - xdp->data;
+ enum pkt_hash_types hash_type;
+ int coe = priv->hw->rx_csum;
+ struct sk_buff *skb;
+ u32 hash;
+
+ skb = stmmac_construct_skb_zc(ch, xdp);
+ if (!skb) {
+ priv->dev->stats.rx_dropped++;
+ return;
+ }
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rxtx_napi, skb);
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+}
+
+static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ unsigned int entry = rx_q->dirty_rx;
+ struct dma_desc *rx_desc = NULL;
+ bool ret = true;
+
+ budget = min(budget, stmmac_rx_dirty(priv, queue));
+
+ while (budget-- > 0 && entry != rx_q->cur_rx) {
+ struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
+ dma_addr_t dma_addr;
+ bool use_rx_wd;
+
+ if (!buf->xdp) {
+ buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
+ if (!buf->xdp) {
+ ret = false;
+ break;
+ }
+ }
+
+ if (priv->extend_desc)
+ rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ rx_desc = rx_q->dma_rx + entry;
+
+ dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
+ stmmac_set_desc_addr(priv, rx_desc, dma_addr);
+ stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
+ stmmac_refill_desc3(priv, rx_q, rx_desc);
+
+ rx_q->rx_count_frames++;
+ rx_q->rx_count_frames += priv->rx_coal_frames[queue];
+ if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
+ rx_q->rx_count_frames = 0;
+
+ use_rx_wd = !priv->rx_coal_frames[queue];
+ use_rx_wd |= rx_q->rx_count_frames > 0;
+ if (!priv->use_riwt)
+ use_rx_wd = false;
+
+ dma_wmb();
+ stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
+
+ entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
+ }
+
+ if (rx_desc) {
+ rx_q->dirty_rx = entry;
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->dirty_rx * sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
+ }
+
+ return ret;
+}
+
+static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int dirty = stmmac_rx_dirty(priv, queue);
+ unsigned int next_entry = rx_q->cur_rx;
+ unsigned int desc_size;
+ struct bpf_prog *prog;
+ bool failure = false;
+ int xdp_status = 0;
+ int status = 0;
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
+ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+ if (priv->extend_desc) {
+ rx_head = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ rx_head = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+ while (count < limit) {
+ struct stmmac_rx_buffer *buf;
+ unsigned int buf1_len = 0;
+ struct dma_desc *np, *p;
+ int entry;
+ int res;
+
+ if (!count && rx_q->state_saved) {
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ error = 0;
+ len = 0;
+ }
+
+ if (count >= limit)
+ break;
+
+read_again:
+ buf1_len = 0;
+ entry = next_entry;
+ buf = &rx_q->buf_pool[entry];
+
+ if (dirty >= STMMAC_RX_FILL_BATCH) {
+ failure = failure ||
+ !stmmac_rx_refill_zc(priv, queue, dirty);
+ dirty = 0;
+ }
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ /* read the status of the incoming frame */
+ status = stmmac_rx_status(priv, &priv->dev->stats,
+ &priv->xstats, p);
+ /* check if managed by the DMA otherwise go ahead */
+ if (unlikely(status & dma_own))
+ break;
+
+ /* Prefetch the next RX descriptor */
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_conf.dma_rx_size);
+ next_entry = rx_q->cur_rx;
+
+ if (priv->extend_desc)
+ np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+ else
+ np = rx_q->dma_rx + next_entry;
+
+ prefetch(np);
+
+ /* Ensure a valid XSK buffer before proceed */
+ if (!buf->xdp)
+ break;
+
+ if (priv->extend_desc)
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
+ &priv->xstats,
+ rx_q->dma_erx + entry);
+ if (unlikely(status == discard_frame)) {
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ dirty++;
+ error = 1;
+ if (!priv->hwts_rx_en)
+ priv->dev->stats.rx_errors++;
+ }
+
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ count++;
+ continue;
+ }
+
+ /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
+ if (likely(status & rx_not_ls)) {
+ xsk_buff_free(buf->xdp);
+ buf->xdp = NULL;
+ dirty++;
+ count++;
+ goto read_again;
+ }
+
+ /* XDP ZC Frame only support primary buffers for now */
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
+ buf1_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+
+ /* RX buffer is good and fit into a XSK pool buffer */
+ buf->xdp->data_end = buf->xdp->data + buf1_len;
+ xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
+
+ prog = READ_ONCE(priv->xdp_prog);
+ res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
+
+ switch (res) {
+ case STMMAC_XDP_PASS:
+ stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
+ xsk_buff_free(buf->xdp);
+ break;
+ case STMMAC_XDP_CONSUMED:
+ xsk_buff_free(buf->xdp);
+ priv->dev->stats.rx_dropped++;
+ break;
+ case STMMAC_XDP_TX:
+ case STMMAC_XDP_REDIRECT:
+ xdp_status |= res;
+ break;
+ }
+
+ buf->xdp = NULL;
+ dirty++;
+ count++;
+ }
+
+ if (status & rx_not_ls) {
+ rx_q->state_saved = true;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
+ }
+
+ stmmac_finalize_xdp_rx(priv, xdp_status);
+
+ priv->xstats.rx_pkt_n += count;
+ priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+
+ if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
+ if (failure || stmmac_rx_dirty(priv, queue) > 0)
+ xsk_set_rx_need_wakeup(rx_q->xsk_pool);
+ else
+ xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
+
+ return (int)count;
+ }
+
+ return failure ? limit : (int)count;
+}
+
+/**
+ * stmmac_rx - manage the receive process
+ * @priv: driver private structure
+ * @limit: napi bugget
+ * @queue: RX queue index.
+ * Description : this the function called by the napi poll method.
+ * It gets all the frames inside the ring.
+ */
+static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned int count = 0, error = 0, len = 0;
+ int status = 0, coe = priv->hw->rx_csum;
+ unsigned int next_entry = rx_q->cur_rx;
+ enum dma_data_direction dma_dir;
+ unsigned int desc_size;
+ struct sk_buff *skb = NULL;
+ struct xdp_buff xdp;
+ int xdp_status = 0;
+ int buf_sz;
+
+ dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
+ buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
+ limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
+
+ if (netif_msg_rx_status(priv)) {
+ void *rx_head;
+
+ netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
+ if (priv->extend_desc) {
+ rx_head = (void *)rx_q->dma_erx;
+ desc_size = sizeof(struct dma_extended_desc);
+ } else {
+ rx_head = (void *)rx_q->dma_rx;
+ desc_size = sizeof(struct dma_desc);
+ }
+
+ stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
+ rx_q->dma_rx_phy, desc_size);
+ }
+ while (count < limit) {
+ unsigned int buf1_len = 0, buf2_len = 0;
+ enum pkt_hash_types hash_type;
+ struct stmmac_rx_buffer *buf;
+ struct dma_desc *np, *p;
+ int entry;
+ u32 hash;
+
+ if (!count && rx_q->state_saved) {
+ skb = rx_q->state.skb;
+ error = rx_q->state.error;
+ len = rx_q->state.len;
+ } else {
+ rx_q->state_saved = false;
+ skb = NULL;
+ error = 0;
+ len = 0;
+ }
+
+read_again:
+ if (count >= limit)
+ break;
+
+ buf1_len = 0;
+ buf2_len = 0;
+ entry = next_entry;
+ buf = &rx_q->buf_pool[entry];
+
+ if (priv->extend_desc)
+ p = (struct dma_desc *)(rx_q->dma_erx + entry);
+ else
+ p = rx_q->dma_rx + entry;
+
+ /* read the status of the incoming frame */
+ status = stmmac_rx_status(priv, &priv->dev->stats,
+ &priv->xstats, p);
+ /* check if managed by the DMA otherwise go ahead */
+ if (unlikely(status & dma_own))
+ break;
+
+ rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
+ priv->dma_conf.dma_rx_size);
+ next_entry = rx_q->cur_rx;
+
+ if (priv->extend_desc)
+ np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
+ else
+ np = rx_q->dma_rx + next_entry;
+
+ prefetch(np);
+
+ if (priv->extend_desc)
+ stmmac_rx_extended_status(priv, &priv->dev->stats,
+ &priv->xstats, rx_q->dma_erx + entry);
+ if (unlikely(status == discard_frame)) {
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ error = 1;
+ if (!priv->hwts_rx_en)
+ priv->dev->stats.rx_errors++;
+ }
+
+ if (unlikely(error && (status & rx_not_ls)))
+ goto read_again;
+ if (unlikely(error)) {
+ dev_kfree_skb(skb);
+ skb = NULL;
+ count++;
+ continue;
+ }
+
+ /* Buffer is good. Go on. */
+
+ prefetch(page_address(buf->page) + buf->page_offset);
+ if (buf->sec_page)
+ prefetch(page_address(buf->sec_page));
+
+ buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
+ len += buf1_len;
+ buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
+ len += buf2_len;
+
+ /* ACS is disabled; strip manually. */
+ if (likely(!(status & rx_not_ls))) {
+ if (buf2_len) {
+ buf2_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ } else if (buf1_len) {
+ buf1_len -= ETH_FCS_LEN;
+ len -= ETH_FCS_LEN;
+ }
+ }
+
+ if (!skb) {
+ unsigned int pre_len, sync_len;
+
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, dma_dir);
+
+ xdp_init_buff(&xdp, buf_sz, &rx_q->xdp_rxq);
+ xdp_prepare_buff(&xdp, page_address(buf->page),
+ buf->page_offset, buf1_len, false);
+
+ pre_len = xdp.data_end - xdp.data_hard_start -
+ buf->page_offset;
+ skb = stmmac_xdp_run_prog(priv, &xdp);
+ /* Due xdp_adjust_tail: DMA sync for_device
+ * cover max len CPU touch
+ */
+ sync_len = xdp.data_end - xdp.data_hard_start -
+ buf->page_offset;
+ sync_len = max(sync_len, pre_len);
+
+ /* For Not XDP_PASS verdict */
+ if (IS_ERR(skb)) {
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & STMMAC_XDP_CONSUMED) {
+ page_pool_put_page(rx_q->page_pool,
+ virt_to_head_page(xdp.data),
+ sync_len, true);
+ buf->page = NULL;
+ priv->dev->stats.rx_dropped++;
+
+ /* Clear skb as it was set as
+ * status by XDP program.
+ */
+ skb = NULL;
+
+ if (unlikely((status & rx_not_ls)))
+ goto read_again;
+
+ count++;
+ continue;
+ } else if (xdp_res & (STMMAC_XDP_TX |
+ STMMAC_XDP_REDIRECT)) {
+ xdp_status |= xdp_res;
+ buf->page = NULL;
+ skb = NULL;
+ count++;
+ continue;
+ }
+ }
+ }
+
+ if (!skb) {
+ /* XDP program may expand or reduce tail */
+ buf1_len = xdp.data_end - xdp.data;
+
+ skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
+ if (!skb) {
+ priv->dev->stats.rx_dropped++;
+ count++;
+ goto drain_data;
+ }
+
+ /* XDP program may adjust header */
+ skb_copy_to_linear_data(skb, xdp.data, buf1_len);
+ skb_put(skb, buf1_len);
+
+ /* Data payload copied into SKB, page ready for recycle */
+ page_pool_recycle_direct(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ } else if (buf1_len) {
+ dma_sync_single_for_cpu(priv->device, buf->addr,
+ buf1_len, dma_dir);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->page, buf->page_offset, buf1_len,
+ priv->dma_conf.dma_buf_sz);
+
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->page);
+ buf->page = NULL;
+ }
+
+ if (buf2_len) {
+ dma_sync_single_for_cpu(priv->device, buf->sec_addr,
+ buf2_len, dma_dir);
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
+ buf->sec_page, 0, buf2_len,
+ priv->dma_conf.dma_buf_sz);
+
+ /* Data payload appended into SKB */
+ page_pool_release_page(rx_q->page_pool, buf->sec_page);
+ buf->sec_page = NULL;
+ }
+
+drain_data:
+ if (likely(status & rx_not_ls))
+ goto read_again;
+ if (!skb)
+ continue;
+
+ /* Got entire packet into SKB. Finish it. */
+
+ stmmac_get_rx_hwtstamp(priv, p, np, skb);
+ stmmac_rx_vlan(priv->dev, skb);
+ skb->protocol = eth_type_trans(skb, priv->dev);
+
+ if (unlikely(!coe))
+ skb_checksum_none_assert(skb);
+ else
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
+ skb_set_hash(skb, hash, hash_type);
+
+ skb_record_rx_queue(skb, queue);
+ napi_gro_receive(&ch->rx_napi, skb);
+ skb = NULL;
+
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += len;
+ count++;
+ }
+
+ if (status & rx_not_ls || skb) {
+ rx_q->state_saved = true;
+ rx_q->state.skb = skb;
+ rx_q->state.error = error;
+ rx_q->state.len = len;
+ }
+
+ stmmac_finalize_xdp_rx(priv, xdp_status);
+
+ stmmac_rx_refill(priv, queue);
+
+ priv->xstats.rx_pkt_n += count;
+ priv->xstats.rxq_stats[queue].rx_pkt_n += count;
+
+ return count;
+}
+
+static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, rx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ u32 chan = ch->index;
+ int work_done;
+
+ priv->xstats.napi_poll++;
+
+ work_done = stmmac_rx(priv, budget, chan);
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, tx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ u32 chan = ch->index;
+ int work_done;
+
+ priv->xstats.napi_poll++;
+
+ work_done = stmmac_tx_clean(priv, budget, chan);
+ work_done = min(work_done, budget);
+
+ if (work_done < budget && napi_complete_done(napi, work_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return work_done;
+}
+
+static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
+{
+ struct stmmac_channel *ch =
+ container_of(napi, struct stmmac_channel, rxtx_napi);
+ struct stmmac_priv *priv = ch->priv_data;
+ int rx_done, tx_done, rxtx_done;
+ u32 chan = ch->index;
+
+ priv->xstats.napi_poll++;
+
+ tx_done = stmmac_tx_clean(priv, budget, chan);
+ tx_done = min(tx_done, budget);
+
+ rx_done = stmmac_rx_zc(priv, budget, chan);
+
+ rxtx_done = max(tx_done, rx_done);
+
+ /* If either TX or RX work is not complete, return budget
+ * and keep pooling
+ */
+ if (rxtx_done >= budget)
+ return budget;
+
+ /* all work done, exit the polling mode */
+ if (napi_complete_done(napi, rxtx_done)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ /* Both RX and TX work done are compelte,
+ * so enable both RX & TX IRQs.
+ */
+ stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+ }
+
+ return min(rxtx_done, budget - 1);
+}
+
+/**
+ * stmmac_tx_timeout
+ * @dev : Pointer to net device structure
+ * @txqueue: the index of the hanging transmit queue
+ * Description: this function is called when a packet transmission fails to
+ * complete within a reasonable time. The driver will mark the error in the
+ * netdev structure and arrange for the device to be reset to a sane state
+ * in order to transmit a new packet.
+ */
+static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ stmmac_global_err(priv);
+}
+
+/**
+ * stmmac_set_rx_mode - entry point for multicast addressing
+ * @dev : pointer to the device structure
+ * Description:
+ * This function is a driver entry point which gets called by the kernel
+ * whenever multicast addresses must be enabled/disabled.
+ * Return value:
+ * void.
+ */
+static void stmmac_set_rx_mode(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ stmmac_set_filter(priv, priv->hw, dev);
+}
+
+/**
+ * stmmac_change_mtu - entry point to change MTU size for the device.
+ * @dev : device pointer.
+ * @new_mtu : the new MTU size for the device.
+ * Description: the Maximum Transfer Unit (MTU) is used by the network layer
+ * to drive packet transmission. Ethernet has an MTU of 1500 octets
+ * (ETH_DATA_LEN). This value can be changed with ifconfig.
+ * Return value:
+ * 0 on success and an appropriate (-)ve integer as defined in errno.h
+ * file on failure.
+ */
+static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int txfifosz = priv->plat->tx_fifo_size;
+ struct stmmac_dma_conf *dma_conf;
+ const int mtu = new_mtu;
+ int ret;
+
+ if (txfifosz == 0)
+ txfifosz = priv->dma_cap.tx_fifo_size;
+
+ txfifosz /= priv->plat->tx_queues_to_use;
+
+ if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
+ netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
+ return -EINVAL;
+ }
+
+ new_mtu = STMMAC_ALIGN(new_mtu);
+
+ /* If condition true, FIFO is too small or MTU too large */
+ if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
+ /* Try to allocate the new DMA conf with the new mtu */
+ dma_conf = stmmac_setup_dma_desc(priv, mtu);
+ if (IS_ERR(dma_conf)) {
+ netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
+ mtu);
+ return PTR_ERR(dma_conf);
+ }
+
+ stmmac_release(dev);
+
+ ret = __stmmac_open(dev, dma_conf);
+ if (ret) {
+ free_dma_desc_resources(priv, dma_conf);
+ kfree(dma_conf);
+ netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
+ return ret;
+ }
+
+ kfree(dma_conf);
+
+ stmmac_set_rx_mode(dev);
+ }
+
+ dev->mtu = mtu;
+ netdev_update_features(dev);
+
+ return 0;
+}
+
+static netdev_features_t stmmac_fix_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
+ features &= ~NETIF_F_RXCSUM;
+
+ if (!priv->plat->tx_coe)
+ features &= ~NETIF_F_CSUM_MASK;
+
+ /* Some GMAC devices have a bugged Jumbo frame support that
+ * needs to have the Tx COE disabled for oversized frames
+ * (due to limited buffer sizes). In this case we disable
+ * the TX csum insertion in the TDES and not use SF.
+ */
+ if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
+ features &= ~NETIF_F_CSUM_MASK;
+
+ /* Disable tso if asked by ethtool */
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ if (features & NETIF_F_TSO)
+ priv->tso = true;
+ else
+ priv->tso = false;
+ }
+
+ return features;
+}
+
+static int stmmac_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct stmmac_priv *priv = netdev_priv(netdev);
+
+ /* Keep the COE Type in case of csum is supporting */
+ if (features & NETIF_F_RXCSUM)
+ priv->hw->rx_csum = priv->plat->rx_coe;
+ else
+ priv->hw->rx_csum = 0;
+ /* No check needed because rx_coe has been set before and it will be
+ * fixed in case of issue.
+ */
+ stmmac_rx_ipc(priv, priv->hw);
+
+ if (priv->sph_cap) {
+ bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+ u32 chan;
+
+ for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ return 0;
+}
+
+static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
+{
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+
+ if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
+ return;
+
+ /* If LP has sent verify mPacket, LP is FPE capable */
+ if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
+ if (*lp_state < FPE_STATE_CAPABLE)
+ *lp_state = FPE_STATE_CAPABLE;
+
+ /* If user has requested FPE enable, quickly response */
+ if (*hs_enable)
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ fpe_cfg,
+ MPACKET_RESPONSE);
+ }
+
+ /* If Local has sent verify mPacket, Local is FPE capable */
+ if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
+ if (*lo_state < FPE_STATE_CAPABLE)
+ *lo_state = FPE_STATE_CAPABLE;
+ }
+
+ /* If LP has sent response mPacket, LP is entering FPE ON */
+ if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
+ *lp_state = FPE_STATE_ENTERING_ON;
+
+ /* If Local has sent response mPacket, Local is entering FPE ON */
+ if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
+ *lo_state = FPE_STATE_ENTERING_ON;
+
+ if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
+ !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
+ priv->fpe_wq) {
+ queue_work(priv->fpe_wq, &priv->fpe_task);
+ }
+}
+
+static void stmmac_common_interrupt(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queues_count;
+ u32 queue;
+ bool xmac;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+ queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
+
+ if (priv->irq_wake)
+ pm_wakeup_event(priv->device, 0);
+
+ if (priv->dma_cap.estsel)
+ stmmac_est_irq_status(priv, priv->ioaddr, priv->dev,
+ &priv->xstats, tx_cnt);
+
+ if (priv->dma_cap.fpesel) {
+ int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
+ priv->dev);
+
+ stmmac_fpe_event_status(priv, status);
+ }
+
+ /* To handle GMAC own interrupts */
+ if ((priv->plat->has_gmac) || xmac) {
+ int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
+
+ if (unlikely(status)) {
+ /* For LPI we need to save the tx status */
+ if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
+ priv->tx_path_in_lpi_mode = true;
+ if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
+ priv->tx_path_in_lpi_mode = false;
+ }
+
+ for (queue = 0; queue < queues_count; queue++) {
+ status = stmmac_host_mtl_irq_status(priv, priv->hw,
+ queue);
+ }
+
+ /* PCS link status */
+ if (priv->hw->pcs) {
+ if (priv->xstats.pcs_link)
+ netif_carrier_on(priv->dev);
+ else
+ netif_carrier_off(priv->dev);
+ }
+
+ stmmac_timestamp_interrupt(priv, priv);
+ }
+}
+
+/**
+ * stmmac_interrupt - main ISR
+ * @irq: interrupt number.
+ * @dev_id: to pass the net device pointer.
+ * Description: this is the main driver interrupt service routine.
+ * It can call:
+ * o DMA service routine (to manage incoming frame reception and transmission
+ * status)
+ * o Core interrupts to manage: remote wake-up, management counter, LPI
+ * interrupts.
+ */
+static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* Check if a fatal error happened */
+ if (stmmac_safety_feat_interrupt(priv))
+ return IRQ_HANDLED;
+
+ /* To handle Common interrupts */
+ stmmac_common_interrupt(priv);
+
+ /* To handle DMA interrupts */
+ stmmac_dma_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* To handle Common interrupts */
+ stmmac_common_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(!dev)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ /* Check if a fatal error happened */
+ stmmac_safety_feat_interrupt(priv);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
+{
+ struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
+ int chan = tx_q->queue_index;
+ struct stmmac_priv *priv;
+ int status;
+
+ dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
+
+ if (unlikely(!data)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
+
+ if (unlikely(status & tx_hard_error_bump_tc)) {
+ /* Try to bump up the dma threshold on this failure */
+ stmmac_bump_dma_threshold(priv, chan);
+ } else if (unlikely(status == tx_hard_error)) {
+ stmmac_tx_err(priv, chan);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
+{
+ struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
+ struct stmmac_dma_conf *dma_conf;
+ int chan = rx_q->queue_index;
+ struct stmmac_priv *priv;
+
+ dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
+ priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
+
+ if (unlikely(!data)) {
+ netdev_err(priv->dev, "%s: invalid dev pointer\n", __func__);
+ return IRQ_NONE;
+ }
+
+ /* Check if adapter is up */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return IRQ_HANDLED;
+
+ stmmac_napi_check(priv, chan, DMA_DIR_RX);
+
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* Polling receive - used by NETCONSOLE and other diagnostic tools
+ * to allow network I/O with interrupts disabled.
+ */
+static void stmmac_poll_controller(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ /* If adapter is down, do nothing */
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return;
+
+ if (priv->plat->multi_msi_en) {
+ for (i = 0; i < priv->plat->rx_queues_to_use; i++)
+ stmmac_msi_intr_rx(0, &priv->dma_conf.rx_queue[i]);
+
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ stmmac_msi_intr_tx(0, &priv->dma_conf.tx_queue[i]);
+ } else {
+ disable_irq(dev->irq);
+ stmmac_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+ }
+}
+#endif
+
+/**
+ * stmmac_ioctl - Entry point for the Ioctl
+ * @dev: Device pointer.
+ * @rq: An IOCTL specefic structure, that can contain a pointer to
+ * a proprietary structure used to pass information to the driver.
+ * @cmd: IOCTL command
+ * Description:
+ * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
+ */
+static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct stmmac_priv *priv = netdev_priv (dev);
+ int ret = -EOPNOTSUPP;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
+ break;
+ case SIOCSHWTSTAMP:
+ ret = stmmac_hwtstamp_set(dev, rq);
+ break;
+ case SIOCGHWTSTAMP:
+ ret = stmmac_hwtstamp_get(dev, rq);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct stmmac_priv *priv = cb_priv;
+ int ret = -EOPNOTSUPP;
+
+ if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
+ return ret;
+
+ __stmmac_disable_all_queues(priv);
+
+ switch (type) {
+ case TC_SETUP_CLSU32:
+ ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
+ break;
+ case TC_SETUP_CLSFLOWER:
+ ret = stmmac_tc_setup_cls(priv, priv, type_data);
+ break;
+ default:
+ break;
+ }
+
+ stmmac_enable_all_queues(priv);
+ return ret;
+}
+
+static LIST_HEAD(stmmac_block_cb_list);
+
+static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &stmmac_block_cb_list,
+ stmmac_setup_tc_block_cb,
+ priv, priv, true);
+ case TC_SETUP_QDISC_CBS:
+ return stmmac_tc_setup_cbs(priv, priv, type_data);
+ case TC_SETUP_QDISC_TAPRIO:
+ return stmmac_tc_setup_taprio(priv, priv, type_data);
+ case TC_SETUP_QDISC_ETF:
+ return stmmac_tc_setup_etf(priv, priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev)
+{
+ int gso = skb_shinfo(skb)->gso_type;
+
+ if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
+ /*
+ * There is no way to determine the number of TSO/USO
+ * capable Queues. Let's use always the Queue 0
+ * because if TSO/USO is supported then at least this
+ * one will be capable.
+ */
+ return 0;
+ }
+
+ return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
+}
+
+static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret = 0;
+
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
+ ret = eth_mac_addr(ndev, addr);
+ if (ret)
+ goto set_mac_error;
+
+ stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
+
+set_mac_error:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static struct dentry *stmmac_fs_dir;
+
+static void sysfs_display_ring(void *head, int size, int extend_desc,
+ struct seq_file *seq, dma_addr_t dma_phy_addr)
+{
+ int i;
+ struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
+ struct dma_desc *p = (struct dma_desc *)head;
+ dma_addr_t dma_addr;
+
+ for (i = 0; i < size; i++) {
+ if (extend_desc) {
+ dma_addr = dma_phy_addr + i * sizeof(*ep);
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(ep->basic.des0),
+ le32_to_cpu(ep->basic.des1),
+ le32_to_cpu(ep->basic.des2),
+ le32_to_cpu(ep->basic.des3));
+ ep++;
+ } else {
+ dma_addr = dma_phy_addr + i * sizeof(*p);
+ seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
+ i, &dma_addr,
+ le32_to_cpu(p->des0), le32_to_cpu(p->des1),
+ le32_to_cpu(p->des2), le32_to_cpu(p->des3));
+ p++;
+ }
+ seq_printf(seq, "\n");
+ }
+}
+
+static int stmmac_rings_status_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_count = priv->plat->rx_queues_to_use;
+ u32 tx_count = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ if ((dev->flags & IFF_UP) == 0)
+ return 0;
+
+ for (queue = 0; queue < rx_count; queue++) {
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+
+ seq_printf(seq, "RX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_erx,
+ priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
+ } else {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)rx_q->dma_rx,
+ priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
+ }
+ }
+
+ for (queue = 0; queue < tx_count; queue++) {
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+
+ seq_printf(seq, "TX Queue %d:\n", queue);
+
+ if (priv->extend_desc) {
+ seq_printf(seq, "Extended descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_etx,
+ priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
+ } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
+ seq_printf(seq, "Descriptor ring:\n");
+ sysfs_display_ring((void *)tx_q->dma_tx,
+ priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
+ }
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
+
+static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
+{
+ struct net_device *dev = seq->private;
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (!priv->hw_cap_support) {
+ seq_printf(seq, "DMA HW features not supported\n");
+ return 0;
+ }
+
+ seq_printf(seq, "==============================\n");
+ seq_printf(seq, "\tDMA HW features\n");
+ seq_printf(seq, "==============================\n");
+
+ seq_printf(seq, "\t10/100 Mbps: %s\n",
+ (priv->dma_cap.mbps_10_100) ? "Y" : "N");
+ seq_printf(seq, "\t1000 Mbps: %s\n",
+ (priv->dma_cap.mbps_1000) ? "Y" : "N");
+ seq_printf(seq, "\tHalf duplex: %s\n",
+ (priv->dma_cap.half_duplex) ? "Y" : "N");
+ seq_printf(seq, "\tHash Filter: %s\n",
+ (priv->dma_cap.hash_filter) ? "Y" : "N");
+ seq_printf(seq, "\tMultiple MAC address registers: %s\n",
+ (priv->dma_cap.multi_addr) ? "Y" : "N");
+ seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
+ (priv->dma_cap.pcs) ? "Y" : "N");
+ seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
+ (priv->dma_cap.sma_mdio) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Remote wake up: %s\n",
+ (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
+ seq_printf(seq, "\tPMT Magic Frame: %s\n",
+ (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
+ seq_printf(seq, "\tRMON module: %s\n",
+ (priv->dma_cap.rmon) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
+ (priv->dma_cap.time_stamp) ? "Y" : "N");
+ seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
+ (priv->dma_cap.atime_stamp) ? "Y" : "N");
+ seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
+ (priv->dma_cap.eee) ? "Y" : "N");
+ seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
+ seq_printf(seq, "\tChecksum Offload in TX: %s\n",
+ (priv->dma_cap.tx_coe) ? "Y" : "N");
+ if (priv->synopsys_id >= DWMAC_CORE_4_00) {
+ seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
+ (priv->dma_cap.rx_coe) ? "Y" : "N");
+ } else {
+ seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
+ seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
+ (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
+ }
+ seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
+ (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
+ seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
+ priv->dma_cap.number_rx_channel);
+ seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
+ priv->dma_cap.number_tx_channel);
+ seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
+ priv->dma_cap.number_rx_queues);
+ seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
+ priv->dma_cap.number_tx_queues);
+ seq_printf(seq, "\tEnhanced descriptors: %s\n",
+ (priv->dma_cap.enh_desc) ? "Y" : "N");
+ seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
+ seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
+ seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz);
+ seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
+ seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
+ priv->dma_cap.pps_out_num);
+ seq_printf(seq, "\tSafety Features: %s\n",
+ priv->dma_cap.asp ? "Y" : "N");
+ seq_printf(seq, "\tFlexible RX Parser: %s\n",
+ priv->dma_cap.frpsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhanced Addressing: %d\n",
+ priv->dma_cap.host_dma_width);
+ seq_printf(seq, "\tReceive Side Scaling: %s\n",
+ priv->dma_cap.rssen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
+ priv->dma_cap.vlhash ? "Y" : "N");
+ seq_printf(seq, "\tSplit Header: %s\n",
+ priv->dma_cap.sphen ? "Y" : "N");
+ seq_printf(seq, "\tVLAN TX Insertion: %s\n",
+ priv->dma_cap.vlins ? "Y" : "N");
+ seq_printf(seq, "\tDouble VLAN: %s\n",
+ priv->dma_cap.dvlan ? "Y" : "N");
+ seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
+ priv->dma_cap.l3l4fnum);
+ seq_printf(seq, "\tARP Offloading: %s\n",
+ priv->dma_cap.arpoffsel ? "Y" : "N");
+ seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
+ priv->dma_cap.estsel ? "Y" : "N");
+ seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
+ priv->dma_cap.fpesel ? "Y" : "N");
+ seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
+ priv->dma_cap.tbssel ? "Y" : "N");
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
+
+/* Use network device events to rename debugfs file entries.
+ */
+static int stmmac_device_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ if (dev->netdev_ops != &stmmac_netdev_ops)
+ goto done;
+
+ switch (event) {
+ case NETDEV_CHANGENAME:
+ if (priv->dbgfs_dir)
+ priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
+ priv->dbgfs_dir,
+ stmmac_fs_dir,
+ dev->name);
+ break;
+ }
+done:
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block stmmac_notifier = {
+ .notifier_call = stmmac_device_event,
+};
+
+static void stmmac_init_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ rtnl_lock();
+
+ /* Create per netdev entries */
+ priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
+
+ /* Entry to report DMA RX/TX rings */
+ debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
+ &stmmac_rings_status_fops);
+
+ /* Entry to report the DMA HW features */
+ debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
+ &stmmac_dma_cap_fops);
+
+ rtnl_unlock();
+}
+
+static void stmmac_exit_fs(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static u32 stmmac_vid_crc32_le(__le16 vid_le)
+{
+ unsigned char *data = (unsigned char *)&vid_le;
+ unsigned char data_byte = 0;
+ u32 crc = ~0x0;
+ u32 temp = 0;
+ int i, bits;
+
+ bits = get_bitmask_order(VLAN_VID_MASK);
+ for (i = 0; i < bits; i++) {
+ if ((i % 8) == 0)
+ data_byte = data[i / 8];
+
+ temp = ((crc & 1) ^ data_byte) & 1;
+ crc >>= 1;
+ data_byte >>= 1;
+
+ if (temp)
+ crc ^= 0xedb88320;
+ }
+
+ return crc;
+}
+
+static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
+{
+ u32 crc, hash = 0;
+ __le16 pmatch = 0;
+ int count = 0;
+ u16 vid = 0;
+
+ for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
+ __le16 vid_le = cpu_to_le16(vid);
+ crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
+ hash |= (1 << crc);
+ count++;
+ }
+
+ if (!priv->dma_cap.vlhash) {
+ if (count > 2) /* VID = 0 always passes filter */
+ return -EOPNOTSUPP;
+
+ pmatch = cpu_to_le16(vid);
+ hash = 0;
+ }
+
+ return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
+}
+
+static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ set_bit(vid, priv->active_vlans);
+ ret = stmmac_vlan_update(priv, is_double);
+ if (ret) {
+ clear_bit(vid, priv->active_vlans);
+ goto err_pm_put;
+ }
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ goto err_pm_put;
+ }
+err_pm_put:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ bool is_double = false;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
+ if (be16_to_cpu(proto) == ETH_P_8021AD)
+ is_double = true;
+
+ clear_bit(vid, priv->active_vlans);
+
+ if (priv->hw->num_vlan) {
+ ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
+ if (ret)
+ goto del_vlan_error;
+ }
+
+ ret = stmmac_vlan_update(priv, is_double);
+
+del_vlan_error:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
+ case XDP_SETUP_XSK_POOL:
+ return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
+ bpf->xsk.queue_id);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int cpu = smp_processor_id();
+ struct netdev_queue *nq;
+ int i, nxmit = 0;
+ int queue;
+
+ if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
+ return -ENETDOWN;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ queue = stmmac_xdp_get_tx_queue(priv, cpu);
+ nq = netdev_get_tx_queue(priv->dev, queue);
+
+ __netif_tx_lock(nq, cpu);
+ /* Avoids TX time-out as we are sharing with slow path */
+ txq_trans_cond_update(nq);
+
+ for (i = 0; i < num_frames; i++) {
+ int res;
+
+ res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
+ if (res == STMMAC_XDP_CONSUMED)
+ break;
+
+ nxmit++;
+ }
+
+ if (flags & XDP_XMIT_FLUSH) {
+ stmmac_flush_tx_descriptors(priv, queue);
+ stmmac_tx_timer_arm(priv, queue);
+ }
+
+ __netif_tx_unlock(nq);
+
+ return nxmit;
+}
+
+void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ stmmac_stop_rx_dma(priv, queue);
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
+}
+
+void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+ u32 buf_size;
+ int ret;
+
+ ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to alloc RX desc.\n");
+ return;
+ }
+
+ ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
+ if (ret) {
+ __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
+ netdev_err(priv->dev, "Failed to init RX desc.\n");
+ return;
+ }
+
+ stmmac_reset_rx_queue(priv, queue);
+ stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, rx_q->queue_index);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, rx_q->queue_index);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_conf.dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_start_rx_dma(priv, queue);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ stmmac_stop_tx_dma(priv, queue);
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
+}
+
+void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+ struct stmmac_channel *ch = &priv->channel[queue];
+ unsigned long flags;
+ int ret;
+
+ ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
+ if (ret) {
+ netdev_err(priv->dev, "Failed to alloc TX desc.\n");
+ return;
+ }
+
+ ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
+ if (ret) {
+ __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
+ netdev_err(priv->dev, "Failed to init TX desc.\n");
+ return;
+ }
+
+ stmmac_reset_tx_queue(priv, queue);
+ stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, tx_q->queue_index);
+
+ if (tx_q->tbs & STMMAC_TBS_AVAIL)
+ stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, tx_q->queue_index);
+
+ stmmac_start_tx_dma(priv, queue);
+
+ spin_lock_irqsave(&ch->lock, flags);
+ stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
+ spin_unlock_irqrestore(&ch->lock, flags);
+}
+
+void stmmac_xdp_release(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 chan;
+
+ /* Ensure tx function is not running */
+ netif_tx_disable(dev);
+
+ /* Disable NAPI process */
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ /* Free the IRQ lines */
+ stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
+
+ /* Stop TX/RX DMA channels */
+ stmmac_stop_all_dma(priv);
+
+ /* Release and free the Rx/Tx resources */
+ free_dma_desc_resources(priv, &priv->dma_conf);
+
+ /* Disable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, false);
+
+ /* set trans_start so we don't get spurious
+ * watchdogs during reset
+ */
+ netif_trans_update(dev);
+ netif_carrier_off(dev);
+}
+
+int stmmac_xdp_open(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 dma_csr_ch = max(rx_cnt, tx_cnt);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ u32 buf_size;
+ bool sph_en;
+ u32 chan;
+ int ret;
+
+ ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors allocation failed\n",
+ __func__);
+ goto dma_desc_error;
+ }
+
+ ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
+ if (ret < 0) {
+ netdev_err(dev, "%s: DMA descriptors initialization failed\n",
+ __func__);
+ goto init_error;
+ }
+
+ stmmac_reset_queues_param(priv);
+
+ /* DMA CSR Channel configuration */
+ for (chan = 0; chan < dma_csr_ch; chan++) {
+ stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
+ stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
+ }
+
+ /* Adjust Split header */
+ sph_en = (priv->hw->rx_csum > 0) && priv->sph;
+
+ /* DMA RX Channel Configuration */
+ for (chan = 0; chan < rx_cnt; chan++) {
+ rx_q = &priv->dma_conf.rx_queue[chan];
+
+ stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ rx_q->dma_rx_phy, chan);
+
+ rx_q->rx_tail_addr = rx_q->dma_rx_phy +
+ (rx_q->buf_alloc_num *
+ sizeof(struct dma_desc));
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
+ rx_q->rx_tail_addr, chan);
+
+ if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
+ buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ buf_size,
+ rx_q->queue_index);
+ } else {
+ stmmac_set_dma_bfsize(priv, priv->ioaddr,
+ priv->dma_conf.dma_buf_sz,
+ rx_q->queue_index);
+ }
+
+ stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
+ }
+
+ /* DMA TX Channel Configuration */
+ for (chan = 0; chan < tx_cnt; chan++) {
+ tx_q = &priv->dma_conf.tx_queue[chan];
+
+ stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
+ tx_q->dma_tx_phy, chan);
+
+ tx_q->tx_tail_addr = tx_q->dma_tx_phy;
+ stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
+ tx_q->tx_tail_addr, chan);
+
+ hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ tx_q->txtimer.function = stmmac_tx_timer;
+ }
+
+ /* Enable the MAC Rx/Tx */
+ stmmac_mac_set(priv, priv->ioaddr, true);
+
+ /* Start Rx & Tx DMA Channels */
+ stmmac_start_all_dma(priv);
+
+ ret = stmmac_request_irq(dev);
+ if (ret)
+ goto irq_error;
+
+ /* Enable NAPI process*/
+ stmmac_enable_all_queues(priv);
+ netif_carrier_on(dev);
+ netif_tx_start_all_queues(dev);
+ stmmac_enable_all_dma_irq(priv);
+
+ return 0;
+
+irq_error:
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ stmmac_hw_teardown(dev);
+init_error:
+ free_dma_desc_resources(priv, &priv->dma_conf);
+dma_desc_error:
+ return ret;
+}
+
+int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ struct stmmac_rx_queue *rx_q;
+ struct stmmac_tx_queue *tx_q;
+ struct stmmac_channel *ch;
+
+ if (test_bit(STMMAC_DOWN, &priv->state) ||
+ !netif_carrier_ok(priv->dev))
+ return -ENETDOWN;
+
+ if (!stmmac_xdp_is_enabled(priv))
+ return -EINVAL;
+
+ if (queue >= priv->plat->rx_queues_to_use ||
+ queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+
+ rx_q = &priv->dma_conf.rx_queue[queue];
+ tx_q = &priv->dma_conf.tx_queue[queue];
+ ch = &priv->channel[queue];
+
+ if (!rx_q->xsk_pool && !tx_q->xsk_pool)
+ return -EINVAL;
+
+ if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
+ /* EQoS does not have per-DMA channel SW interrupt,
+ * so we schedule RX Napi straight-away.
+ */
+ if (likely(napi_schedule_prep(&ch->rxtx_napi)))
+ __napi_schedule(&ch->rxtx_napi);
+ }
+
+ return 0;
+}
+
+static const struct net_device_ops stmmac_netdev_ops = {
+ .ndo_open = stmmac_open,
+ .ndo_start_xmit = stmmac_xmit,
+ .ndo_stop = stmmac_release,
+ .ndo_change_mtu = stmmac_change_mtu,
+ .ndo_fix_features = stmmac_fix_features,
+ .ndo_set_features = stmmac_set_features,
+ .ndo_set_rx_mode = stmmac_set_rx_mode,
+ .ndo_tx_timeout = stmmac_tx_timeout,
+ .ndo_eth_ioctl = stmmac_ioctl,
+ .ndo_setup_tc = stmmac_setup_tc,
+ .ndo_select_queue = stmmac_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = stmmac_poll_controller,
+#endif
+ .ndo_set_mac_address = stmmac_set_mac_address,
+ .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
+ .ndo_bpf = stmmac_bpf,
+ .ndo_xdp_xmit = stmmac_xdp_xmit,
+ .ndo_xsk_wakeup = stmmac_xsk_wakeup,
+};
+
+static void stmmac_reset_subtask(struct stmmac_priv *priv)
+{
+ if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
+ return;
+ if (test_bit(STMMAC_DOWN, &priv->state))
+ return;
+
+ netdev_err(priv->dev, "Reset adapter.\n");
+
+ rtnl_lock();
+ netif_trans_update(priv->dev);
+ while (test_and_set_bit(STMMAC_RESETING, &priv->state))
+ usleep_range(1000, 2000);
+
+ set_bit(STMMAC_DOWN, &priv->state);
+ dev_close(priv->dev);
+ dev_open(priv->dev, NULL);
+ clear_bit(STMMAC_DOWN, &priv->state);
+ clear_bit(STMMAC_RESETING, &priv->state);
+ rtnl_unlock();
+}
+
+static void stmmac_service_task(struct work_struct *work)
+{
+ struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
+ service_task);
+
+ stmmac_reset_subtask(priv);
+ clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
+}
+
+/**
+ * stmmac_hw_init - Init the MAC device
+ * @priv: driver private structure
+ * Description: this function is to configure the MAC device according to
+ * some platform parameters or the HW capability register. It prepares the
+ * driver to use either ring or chain modes and to setup either enhanced or
+ * normal descriptors.
+ */
+static int stmmac_hw_init(struct stmmac_priv *priv)
+{
+ int ret;
+
+ /* dwmac-sun8i only work in chain mode */
+ if (priv->plat->has_sun8i)
+ chain_mode = 1;
+ priv->chain_mode = chain_mode;
+
+ /* Initialize HW Interface */
+ ret = stmmac_hwif_init(priv);
+ if (ret)
+ return ret;
+
+ /* Get the HW capability (new GMAC newer than 3.50a) */
+ priv->hw_cap_support = stmmac_get_hw_features(priv);
+ if (priv->hw_cap_support) {
+ dev_info(priv->device, "DMA HW capability register supported\n");
+
+ /* We can override some gmac/dma configuration fields: e.g.
+ * enh_desc, tx_coe (e.g. that are passed through the
+ * platform) with the values from the HW capability
+ * register (if supported).
+ */
+ priv->plat->enh_desc = priv->dma_cap.enh_desc;
+ priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
+ !priv->plat->use_phy_wol;
+ priv->hw->pmt = priv->plat->pmt;
+ if (priv->dma_cap.hash_tb_sz) {
+ priv->hw->multicast_filter_bins =
+ (BIT(priv->dma_cap.hash_tb_sz) << 5);
+ priv->hw->mcast_bits_log2 =
+ ilog2(priv->hw->multicast_filter_bins);
+ }
+
+ /* TXCOE doesn't work in thresh DMA mode */
+ if (priv->plat->force_thresh_dma_mode)
+ priv->plat->tx_coe = 0;
+ else
+ priv->plat->tx_coe = priv->dma_cap.tx_coe;
+
+ /* In case of GMAC4 rx_coe is from HW cap register. */
+ priv->plat->rx_coe = priv->dma_cap.rx_coe;
+
+ if (priv->dma_cap.rx_coe_type2)
+ priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
+ else if (priv->dma_cap.rx_coe_type1)
+ priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
+
+ } else {
+ dev_info(priv->device, "No HW DMA feature register supported\n");
+ }
+
+ if (priv->plat->rx_coe) {
+ priv->hw->rx_csum = priv->plat->rx_coe;
+ dev_info(priv->device, "RX Checksum Offload Engine supported\n");
+ if (priv->synopsys_id < DWMAC_CORE_4_00)
+ dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
+ }
+ if (priv->plat->tx_coe)
+ dev_info(priv->device, "TX Checksum insertion supported\n");
+
+ if (priv->plat->pmt) {
+ dev_info(priv->device, "Wake-Up On Lan supported\n");
+ device_set_wakeup_capable(priv->device, 1);
+ }
+
+ if (priv->dma_cap.tsoen)
+ dev_info(priv->device, "TSO supported\n");
+
+ priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en;
+ priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
+
+ /* Run HW quirks, if any */
+ if (priv->hwif_quirks) {
+ ret = priv->hwif_quirks(priv);
+ if (ret)
+ return ret;
+ }
+
+ /* Rx Watchdog is available in the COREs newer than the 3.40.
+ * In some case, for example on bugged HW this feature
+ * has to be disable and this can be done by passing the
+ * riwt_off field from the platform.
+ */
+ if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
+ (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
+ priv->use_riwt = 1;
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
+ }
+
+ return 0;
+}
+
+static void stmmac_napi_add(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ ch->priv_data = priv;
+ ch->index = queue;
+ spin_lock_init(&ch->lock);
+
+ if (queue < priv->plat->rx_queues_to_use) {
+ netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
+ }
+ if (queue < priv->plat->tx_queues_to_use) {
+ netif_napi_add_tx(dev, &ch->tx_napi,
+ stmmac_napi_poll_tx);
+ }
+ if (queue < priv->plat->rx_queues_to_use &&
+ queue < priv->plat->tx_queues_to_use) {
+ netif_napi_add(dev, &ch->rxtx_napi,
+ stmmac_napi_poll_rxtx);
+ }
+ }
+}
+
+static void stmmac_napi_del(struct net_device *dev)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ u32 queue, maxq;
+
+ maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
+
+ for (queue = 0; queue < maxq; queue++) {
+ struct stmmac_channel *ch = &priv->channel[queue];
+
+ if (queue < priv->plat->rx_queues_to_use)
+ netif_napi_del(&ch->rx_napi);
+ if (queue < priv->plat->tx_queues_to_use)
+ netif_napi_del(&ch->tx_napi);
+ if (queue < priv->plat->rx_queues_to_use &&
+ queue < priv->plat->tx_queues_to_use) {
+ netif_napi_del(&ch->rxtx_napi);
+ }
+ }
+}
+
+int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0, i;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ stmmac_napi_del(dev);
+
+ priv->plat->rx_queues_to_use = rx_cnt;
+ priv->plat->tx_queues_to_use = tx_cnt;
+ if (!netif_is_rxfh_configured(dev))
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i,
+ rx_cnt);
+
+ stmmac_napi_add(dev);
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
+int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int ret = 0;
+
+ if (netif_running(dev))
+ stmmac_release(dev);
+
+ priv->dma_conf.dma_rx_size = rx_size;
+ priv->dma_conf.dma_tx_size = tx_size;
+
+ if (netif_running(dev))
+ ret = stmmac_open(dev);
+
+ return ret;
+}
+
+#define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
+static void stmmac_fpe_lp_task(struct work_struct *work)
+{
+ struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
+ fpe_task);
+ struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
+ enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
+ enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
+ bool *hs_enable = &fpe_cfg->hs_enable;
+ bool *enable = &fpe_cfg->enable;
+ int retries = 20;
+
+ while (retries-- > 0) {
+ /* Bail out immediately if FPE handshake is OFF */
+ if (*lo_state == FPE_STATE_OFF || !*hs_enable)
+ break;
+
+ if (*lo_state == FPE_STATE_ENTERING_ON &&
+ *lp_state == FPE_STATE_ENTERING_ON) {
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ *enable);
+
+ netdev_info(priv->dev, "configured FPE\n");
+
+ *lo_state = FPE_STATE_ON;
+ *lp_state = FPE_STATE_ON;
+ netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
+ break;
+ }
+
+ if ((*lo_state == FPE_STATE_CAPABLE ||
+ *lo_state == FPE_STATE_ENTERING_ON) &&
+ *lp_state != FPE_STATE_ON) {
+ netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
+ *lo_state, *lp_state);
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ fpe_cfg,
+ MPACKET_VERIFY);
+ }
+ /* Sleep then retry */
+ msleep(500);
+ }
+
+ clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
+}
+
+void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
+{
+ if (priv->plat->fpe_cfg->hs_enable != enable) {
+ if (enable) {
+ stmmac_fpe_send_mpacket(priv, priv->ioaddr,
+ priv->plat->fpe_cfg,
+ MPACKET_VERIFY);
+ } else {
+ priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
+ priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
+ }
+
+ priv->plat->fpe_cfg->hs_enable = enable;
+ }
+}
+
+/**
+ * stmmac_dvr_probe
+ * @device: device pointer
+ * @plat_dat: platform data pointer
+ * @res: stmmac resource pointer
+ * Description: this is the main probe function used to
+ * call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * returns 0 on success, otherwise errno.
+ */
+int stmmac_dvr_probe(struct device *device,
+ struct plat_stmmacenet_data *plat_dat,
+ struct stmmac_resources *res)
+{
+ struct net_device *ndev = NULL;
+ struct stmmac_priv *priv;
+ u32 rxq;
+ int i, ret = 0;
+
+ ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
+ MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
+ if (!ndev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(ndev, device);
+
+ priv = netdev_priv(ndev);
+ priv->device = device;
+ priv->dev = ndev;
+
+ stmmac_set_ethtool_ops(ndev);
+ priv->pause = pause;
+ priv->plat = plat_dat;
+ priv->ioaddr = res->addr;
+ priv->dev->base_addr = (unsigned long)res->addr;
+ priv->plat->dma_cfg->multi_msi_en = priv->plat->multi_msi_en;
+
+ priv->dev->irq = res->irq;
+ priv->wol_irq = res->wol_irq;
+ priv->lpi_irq = res->lpi_irq;
+ priv->sfty_ce_irq = res->sfty_ce_irq;
+ priv->sfty_ue_irq = res->sfty_ue_irq;
+ for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
+ priv->rx_irq[i] = res->rx_irq[i];
+ for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
+ priv->tx_irq[i] = res->tx_irq[i];
+
+ if (!is_zero_ether_addr(res->mac))
+ eth_hw_addr_set(priv->dev, res->mac);
+
+ dev_set_drvdata(device, priv->dev);
+
+ /* Verify driver arguments */
+ stmmac_verify_args();
+
+ priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
+ if (!priv->af_xdp_zc_qps)
+ return -ENOMEM;
+
+ /* Allocate workqueue */
+ priv->wq = create_singlethread_workqueue("stmmac_wq");
+ if (!priv->wq) {
+ dev_err(priv->device, "failed to create workqueue\n");
+ ret = -ENOMEM;
+ goto error_wq_init;
+ }
+
+ INIT_WORK(&priv->service_task, stmmac_service_task);
+
+ /* Initialize Link Partner FPE workqueue */
+ INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
+
+ /* Override with kernel parameters if supplied XXX CRS XXX
+ * this needs to have multiple instances
+ */
+ if ((phyaddr >= 0) && (phyaddr <= 31))
+ priv->plat->phy_addr = phyaddr;
+
+ if (priv->plat->stmmac_rst) {
+ ret = reset_control_assert(priv->plat->stmmac_rst);
+ reset_control_deassert(priv->plat->stmmac_rst);
+ /* Some reset controllers have only reset callback instead of
+ * assert + deassert callbacks pair.
+ */
+ if (ret == -ENOTSUPP)
+ reset_control_reset(priv->plat->stmmac_rst);
+ }
+
+ ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
+ if (ret == -ENOTSUPP)
+ dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
+ ERR_PTR(ret));
+
+ /* Wait a bit for the reset to take effect */
+ udelay(10);
+
+ /* Init MAC and get the capabilities */
+ ret = stmmac_hw_init(priv);
+ if (ret)
+ goto error_hw_init;
+
+ /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
+ */
+ if (priv->synopsys_id < DWMAC_CORE_5_20)
+ priv->plat->dma_cfg->dche = false;
+
+ stmmac_check_ether_addr(priv);
+
+ ndev->netdev_ops = &stmmac_netdev_ops;
+
+ ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+
+ ret = stmmac_tc_init(priv, priv);
+ if (!ret) {
+ ndev->hw_features |= NETIF_F_HW_TC;
+ }
+
+ if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) {
+ ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
+ if (priv->plat->has_gmac4)
+ ndev->hw_features |= NETIF_F_GSO_UDP_L4;
+ priv->tso = true;
+ dev_info(priv->device, "TSO feature enabled\n");
+ }
+
+ if (priv->dma_cap.sphen && !priv->plat->sph_disable) {
+ ndev->hw_features |= NETIF_F_GRO;
+ priv->sph_cap = true;
+ priv->sph = priv->sph_cap;
+ dev_info(priv->device, "SPH feature enabled\n");
+ }
+
+ /* Ideally our host DMA address width is the same as for the
+ * device. However, it may differ and then we have to use our
+ * host DMA width for allocation and the device DMA width for
+ * register handling.
+ */
+ if (priv->plat->host_dma_width)
+ priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
+ else
+ priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
+
+ if (priv->dma_cap.host_dma_width) {
+ ret = dma_set_mask_and_coherent(device,
+ DMA_BIT_MASK(priv->dma_cap.host_dma_width));
+ if (!ret) {
+ dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
+ priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
+
+ /*
+ * If more than 32 bits can be addressed, make sure to
+ * enable enhanced addressing mode.
+ */
+ if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
+ priv->plat->dma_cfg->eame = true;
+ } else {
+ ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
+ if (ret) {
+ dev_err(priv->device, "Failed to set DMA Mask\n");
+ goto error_hw_init;
+ }
+
+ priv->dma_cap.host_dma_width = 32;
+ }
+ }
+
+ ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
+ ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
+#ifdef STMMAC_VLAN_TAG_USED
+ /* Both mac100 and gmac support receive VLAN tag detection */
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
+ if (priv->dma_cap.vlhash) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
+ }
+ if (priv->dma_cap.vlins) {
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
+ if (priv->dma_cap.dvlan)
+ ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
+ }
+#endif
+ priv->msg_enable = netif_msg_init(debug, default_msg_level);
+
+ /* Initialize RSS */
+ rxq = priv->plat->rx_queues_to_use;
+ netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
+ for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
+ priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
+
+ if (priv->dma_cap.rssen && priv->plat->rss_en)
+ ndev->features |= NETIF_F_RXHASH;
+
+ /* MTU range: 46 - hw-specific max */
+ ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
+ if (priv->plat->has_xgmac)
+ ndev->max_mtu = XGMAC_JUMBO_LEN;
+ else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
+ ndev->max_mtu = JUMBO_LEN;
+ else
+ ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
+ /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
+ * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
+ */
+ if ((priv->plat->maxmtu < ndev->max_mtu) &&
+ (priv->plat->maxmtu >= ndev->min_mtu))
+ ndev->max_mtu = priv->plat->maxmtu;
+ else if (priv->plat->maxmtu < ndev->min_mtu)
+ dev_warn(priv->device,
+ "%s: warning: maxmtu having invalid value (%d)\n",
+ __func__, priv->plat->maxmtu);
+
+ if (flow_ctrl)
+ priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
+
+ /* Setup channels NAPI */
+ stmmac_napi_add(ndev);
+
+ mutex_init(&priv->lock);
+
+ /* If a specific clk_csr value is passed from the platform
+ * this means that the CSR Clock Range selection cannot be
+ * changed at run-time and it is fixed. Viceversa the driver'll try to
+ * set the MDC clock dynamically according to the csr actual
+ * clock input.
+ */
+ if (priv->plat->clk_csr >= 0)
+ priv->clk_csr = priv->plat->clk_csr;
+ else
+ stmmac_clk_csr_set(priv);
+
+ stmmac_check_pcs_mode(priv);
+
+ pm_runtime_get_noresume(device);
+ pm_runtime_set_active(device);
+ if (!pm_runtime_enabled(device))
+ pm_runtime_enable(device);
+
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI) {
+ /* MDIO bus Registration */
+ ret = stmmac_mdio_register(ndev);
+ if (ret < 0) {
+ dev_err_probe(priv->device, ret,
+ "%s: MDIO bus (id: %d) registration failed\n",
+ __func__, priv->plat->bus_id);
+ goto error_mdio_register;
+ }
+ }
+
+ if (priv->plat->speed_mode_2500)
+ priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
+
+ if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
+ ret = stmmac_xpcs_setup(priv->mii);
+ if (ret)
+ goto error_xpcs_setup;
+ }
+
+ ret = stmmac_phy_setup(priv);
+ if (ret) {
+ netdev_err(ndev, "failed to setup phy (%d)\n", ret);
+ goto error_phy_setup;
+ }
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(priv->device, "%s: ERROR %i registering the device\n",
+ __func__, ret);
+ goto error_netdev_register;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ stmmac_init_fs(ndev);
+#endif
+
+ if (priv->plat->dump_debug_regs)
+ priv->plat->dump_debug_regs(priv->plat->bsp_priv);
+
+ /* Let pm_runtime_put() disable the clocks.
+ * If CONFIG_PM is not enabled, the clocks will stay powered.
+ */
+ pm_runtime_put(device);
+
+ return ret;
+
+error_netdev_register:
+ phylink_destroy(priv->phylink);
+error_xpcs_setup:
+error_phy_setup:
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
+error_mdio_register:
+ stmmac_napi_del(ndev);
+error_hw_init:
+ destroy_workqueue(priv->wq);
+error_wq_init:
+ bitmap_free(priv->af_xdp_zc_qps);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
+
+/**
+ * stmmac_dvr_remove
+ * @dev: device pointer
+ * Description: this function resets the TX/RX processes, disables the MAC RX/TX
+ * changes the link status, releases the DMA descriptor rings.
+ */
+int stmmac_dvr_remove(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ netdev_info(priv->dev, "%s: removing driver", __func__);
+
+ pm_runtime_get_sync(dev);
+
+ stmmac_stop_all_dma(priv);
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ netif_carrier_off(ndev);
+ unregister_netdev(ndev);
+
+#ifdef CONFIG_DEBUG_FS
+ stmmac_exit_fs(ndev);
+#endif
+ phylink_destroy(priv->phylink);
+ if (priv->plat->stmmac_rst)
+ reset_control_assert(priv->plat->stmmac_rst);
+ reset_control_assert(priv->plat->stmmac_ahb_rst);
+ if (priv->hw->pcs != STMMAC_PCS_TBI &&
+ priv->hw->pcs != STMMAC_PCS_RTBI)
+ stmmac_mdio_unregister(ndev);
+ destroy_workqueue(priv->wq);
+ mutex_destroy(&priv->lock);
+ bitmap_free(priv->af_xdp_zc_qps);
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
+
+/**
+ * stmmac_suspend - suspend callback
+ * @dev: device pointer
+ * Description: this is the function to suspend the device and it is called
+ * by the platform driver to stop the network queue, release the resources,
+ * program the PMT register (for WoL), clean and release driver resources.
+ */
+int stmmac_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ u32 chan;
+
+ if (!ndev || !netif_running(ndev))
+ return 0;
+
+ mutex_lock(&priv->lock);
+
+ netif_device_detach(ndev);
+
+ stmmac_disable_all_queues(priv);
+
+ for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
+ hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
+
+ if (priv->eee_enabled) {
+ priv->tx_path_in_lpi_mode = false;
+ del_timer_sync(&priv->eee_ctrl_timer);
+ }
+
+ /* Stop TX/RX DMA */
+ stmmac_stop_all_dma(priv);
+
+ if (priv->plat->serdes_powerdown)
+ priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
+
+ /* Enable Power down mode by programming the PMT regs */
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ stmmac_pmt(priv, priv->hw, priv->wolopts);
+ priv->irq_wake = 1;
+ } else {
+ stmmac_mac_set(priv, priv->ioaddr, false);
+ pinctrl_pm_select_sleep_state(priv->device);
+ }
+
+ mutex_unlock(&priv->lock);
+
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_suspend(priv->phylink, true);
+ } else {
+ if (device_may_wakeup(priv->device))
+ phylink_speed_down(priv->phylink, false);
+ phylink_suspend(priv->phylink, false);
+ }
+ rtnl_unlock();
+
+ if (priv->dma_cap.fpesel) {
+ /* Disable FPE */
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use, false);
+
+ stmmac_fpe_handshake(priv, false);
+ stmmac_fpe_stop_wq(priv);
+ }
+
+ priv->speed = SPEED_UNKNOWN;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_suspend);
+
+static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
+
+ rx_q->cur_rx = 0;
+ rx_q->dirty_rx = 0;
+}
+
+static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
+{
+ struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
+
+ tx_q->cur_tx = 0;
+ tx_q->dirty_tx = 0;
+ tx_q->mss = 0;
+
+ netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
+}
+
+/**
+ * stmmac_reset_queues_param - reset queue parameters
+ * @priv: device pointer
+ */
+static void stmmac_reset_queues_param(struct stmmac_priv *priv)
+{
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
+ u32 queue;
+
+ for (queue = 0; queue < rx_cnt; queue++)
+ stmmac_reset_rx_queue(priv, queue);
+
+ for (queue = 0; queue < tx_cnt; queue++)
+ stmmac_reset_tx_queue(priv, queue);
+}
+
+/**
+ * stmmac_resume - resume callback
+ * @dev: device pointer
+ * Description: when resume this function is invoked to setup the DMA and CORE
+ * in a usable state.
+ */
+int stmmac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ /* Power Down bit, into the PM register, is cleared
+ * automatically as soon as a magic packet or a Wake-up frame
+ * is received. Anyway, it's better to manually clear
+ * this bit because it can generate problems while resuming
+ * from another devices (e.g. serial console).
+ */
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ mutex_lock(&priv->lock);
+ stmmac_pmt(priv, priv->hw, 0);
+ mutex_unlock(&priv->lock);
+ priv->irq_wake = 0;
+ } else {
+ pinctrl_pm_select_default_state(priv->device);
+ /* reset the phy so that it's ready */
+ if (priv->mii)
+ stmmac_mdio_reset(priv->mii);
+ }
+
+ if (priv->plat->serdes_powerup) {
+ ret = priv->plat->serdes_powerup(ndev,
+ priv->plat->bsp_priv);
+
+ if (ret < 0)
+ return ret;
+ }
+
+ rtnl_lock();
+ if (device_may_wakeup(priv->device) && priv->plat->pmt) {
+ phylink_resume(priv->phylink);
+ } else {
+ phylink_resume(priv->phylink);
+ if (device_may_wakeup(priv->device))
+ phylink_speed_up(priv->phylink);
+ }
+ rtnl_unlock();
+
+ rtnl_lock();
+ mutex_lock(&priv->lock);
+
+ stmmac_reset_queues_param(priv);
+
+ stmmac_free_tx_skbufs(priv);
+ stmmac_clear_descriptors(priv, &priv->dma_conf);
+
+ stmmac_hw_setup(ndev, false);
+ stmmac_init_coalesce(priv);
+ stmmac_set_rx_mode(ndev);
+
+ stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
+
+ stmmac_enable_all_queues(priv);
+ stmmac_enable_all_dma_irq(priv);
+
+ mutex_unlock(&priv->lock);
+ rtnl_unlock();
+
+ netif_device_attach(ndev);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(stmmac_resume);
+
+#ifndef MODULE
+static int __init stmmac_cmdline_opt(char *str)
+{
+ char *opt;
+
+ if (!str || !*str)
+ return 1;
+ while ((opt = strsep(&str, ",")) != NULL) {
+ if (!strncmp(opt, "debug:", 6)) {
+ if (kstrtoint(opt + 6, 0, &debug))
+ goto err;
+ } else if (!strncmp(opt, "phyaddr:", 8)) {
+ if (kstrtoint(opt + 8, 0, &phyaddr))
+ goto err;
+ } else if (!strncmp(opt, "buf_sz:", 7)) {
+ if (kstrtoint(opt + 7, 0, &buf_sz))
+ goto err;
+ } else if (!strncmp(opt, "tc:", 3)) {
+ if (kstrtoint(opt + 3, 0, &tc))
+ goto err;
+ } else if (!strncmp(opt, "watchdog:", 9)) {
+ if (kstrtoint(opt + 9, 0, &watchdog))
+ goto err;
+ } else if (!strncmp(opt, "flow_ctrl:", 10)) {
+ if (kstrtoint(opt + 10, 0, &flow_ctrl))
+ goto err;
+ } else if (!strncmp(opt, "pause:", 6)) {
+ if (kstrtoint(opt + 6, 0, &pause))
+ goto err;
+ } else if (!strncmp(opt, "eee_timer:", 10)) {
+ if (kstrtoint(opt + 10, 0, &eee_timer))
+ goto err;
+ } else if (!strncmp(opt, "chain_mode:", 11)) {
+ if (kstrtoint(opt + 11, 0, &chain_mode))
+ goto err;
+ }
+ }
+ return 1;
+
+err:
+ pr_err("%s: ERROR broken module parameter conversion", __func__);
+ return 1;
+}
+
+__setup("stmmaceth=", stmmac_cmdline_opt);
+#endif /* MODULE */
+
+static int __init stmmac_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ /* Create debugfs main directory if it doesn't exist yet */
+ if (!stmmac_fs_dir)
+ stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+ register_netdevice_notifier(&stmmac_notifier);
+#endif
+
+ return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ unregister_netdevice_notifier(&stmmac_notifier);
+ debugfs_remove_recursive(stmmac_fs_dir);
+#endif
+}
+
+module_init(stmmac_init)
+module_exit(stmmac_exit)
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
new file mode 100644
index 000000000..379fc887d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -0,0 +1,584 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ STMMAC Ethernet Driver -- MDIO bus implementation
+ Provides Bus interface for MII registers
+
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Carl Shaw <carl.shaw@st.com>
+ Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/gpio/consumer.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/mii.h>
+#include <linux/of_mdio.h>
+#include <linux/pm_runtime.h>
+#include <linux/phy.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+
+#include "dwxgmac2.h"
+#include "stmmac.h"
+
+#define MII_BUSY 0x00000001
+#define MII_WRITE 0x00000002
+#define MII_DATA_MASK GENMASK(15, 0)
+
+/* GMAC4 defines */
+#define MII_GMAC4_GOC_SHIFT 2
+#define MII_GMAC4_REG_ADDR_SHIFT 16
+#define MII_GMAC4_WRITE (1 << MII_GMAC4_GOC_SHIFT)
+#define MII_GMAC4_READ (3 << MII_GMAC4_GOC_SHIFT)
+#define MII_GMAC4_C45E BIT(1)
+
+/* XGMAC defines */
+#define MII_XGMAC_SADDR BIT(18)
+#define MII_XGMAC_CMD_SHIFT 16
+#define MII_XGMAC_WRITE (1 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_READ (3 << MII_XGMAC_CMD_SHIFT)
+#define MII_XGMAC_BUSY BIT(22)
+#define MII_XGMAC_MAX_C22ADDR 3
+#define MII_XGMAC_C22P_MASK GENMASK(MII_XGMAC_MAX_C22ADDR, 0)
+#define MII_XGMAC_PA_SHIFT 16
+#define MII_XGMAC_DA_SHIFT 21
+
+static int stmmac_xgmac2_c45_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
+{
+ u32 tmp;
+
+ /* Set port as Clause 45 */
+ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
+ tmp &= ~BIT(phyaddr);
+ writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
+
+ *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0xffff);
+ *hw_addr |= (phyreg >> MII_DEVADDR_C45_SHIFT) << MII_XGMAC_DA_SHIFT;
+ return 0;
+}
+
+static int stmmac_xgmac2_c22_format(struct stmmac_priv *priv, int phyaddr,
+ int phyreg, u32 *hw_addr)
+{
+ u32 tmp;
+
+ /* HW does not support C22 addr >= 4 */
+ if (phyaddr > MII_XGMAC_MAX_C22ADDR)
+ return -ENODEV;
+
+ /* Set port as Clause 22 */
+ tmp = readl(priv->ioaddr + XGMAC_MDIO_C22P);
+ tmp &= ~MII_XGMAC_C22P_MASK;
+ tmp |= BIT(phyaddr);
+ writel(tmp, priv->ioaddr + XGMAC_MDIO_C22P);
+
+ *hw_addr = (phyaddr << MII_XGMAC_PA_SHIFT) | (phyreg & 0x1f);
+ return 0;
+}
+
+static int stmmac_xgmac2_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 tmp, addr, value = MII_XGMAC_BUSY;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
+
+ if (phyreg & MII_ADDR_C45) {
+ phyreg &= ~MII_ADDR_C45;
+
+ ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ goto err_disable_clks;
+ } else {
+ ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ goto err_disable_clks;
+
+ value |= MII_XGMAC_SADDR;
+ }
+
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= MII_XGMAC_READ;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
+
+ /* Set the MII address register to read */
+ writel(addr, priv->ioaddr + mii_address);
+ writel(value, priv->ioaddr + mii_data);
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
+
+ /* Read the data from the MII data register */
+ ret = (int)readl(priv->ioaddr + mii_data) & GENMASK(15, 0);
+
+err_disable_clks:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+static int stmmac_xgmac2_mdio_write(struct mii_bus *bus, int phyaddr,
+ int phyreg, u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 addr, tmp, value = MII_XGMAC_BUSY;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
+
+ if (phyreg & MII_ADDR_C45) {
+ phyreg &= ~MII_ADDR_C45;
+
+ ret = stmmac_xgmac2_c45_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ goto err_disable_clks;
+ } else {
+ ret = stmmac_xgmac2_c22_format(priv, phyaddr, phyreg, &addr);
+ if (ret)
+ goto err_disable_clks;
+
+ value |= MII_XGMAC_SADDR;
+ }
+
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ value |= phydata;
+ value |= MII_XGMAC_WRITE;
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
+
+ /* Set the MII address register to write */
+ writel(addr, priv->ioaddr + mii_address);
+ writel(value, priv->ioaddr + mii_data);
+
+ /* Wait until any existing MII operation is complete */
+ ret = readl_poll_timeout(priv->ioaddr + mii_data, tmp,
+ !(tmp & MII_XGMAC_BUSY), 100, 10000);
+
+err_disable_clks:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+/**
+ * stmmac_mdio_read
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr
+ * @phyreg: MII reg
+ * Description: it reads data from the MII register from within the phy device.
+ * For the 7111 GMAC, we must set the bit 0 in the MII address register while
+ * accessing the PHY registers.
+ * Fortunately, it seems this has no drawback for the 7109 MAC.
+ */
+static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ u32 value = MII_BUSY;
+ int data = 0;
+ u32 v;
+
+ data = pm_runtime_resume_and_get(priv->device);
+ if (data < 0)
+ return data;
+
+ value |= (phyaddr << priv->hw->mii.addr_shift)
+ & priv->hw->mii.addr_mask;
+ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ if (priv->plat->has_gmac4) {
+ value |= MII_GMAC4_READ;
+ if (phyreg & MII_ADDR_C45) {
+ value |= MII_GMAC4_C45E;
+ value &= ~priv->hw->mii.reg_mask;
+ value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) <<
+ priv->hw->mii.reg_shift) &
+ priv->hw->mii.reg_mask;
+
+ data |= (phyreg & MII_REGADDR_C45_MASK) <<
+ MII_GMAC4_REG_ADDR_SHIFT;
+ }
+ }
+
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000)) {
+ data = -EBUSY;
+ goto err_disable_clks;
+ }
+
+ writel(data, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000)) {
+ data = -EBUSY;
+ goto err_disable_clks;
+ }
+
+ /* Read the data from the MII data register */
+ data = (int)readl(priv->ioaddr + mii_data) & MII_DATA_MASK;
+
+err_disable_clks:
+ pm_runtime_put(priv->device);
+
+ return data;
+}
+
+/**
+ * stmmac_mdio_write
+ * @bus: points to the mii_bus structure
+ * @phyaddr: MII addr
+ * @phyreg: MII reg
+ * @phydata: phy data
+ * Description: it writes the data into the MII register from within the device.
+ */
+static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
+ u16 phydata)
+{
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+ unsigned int mii_data = priv->hw->mii.data;
+ int ret, data = phydata;
+ u32 value = MII_BUSY;
+ u32 v;
+
+ ret = pm_runtime_resume_and_get(priv->device);
+ if (ret < 0)
+ return ret;
+
+ value |= (phyaddr << priv->hw->mii.addr_shift)
+ & priv->hw->mii.addr_mask;
+ value |= (phyreg << priv->hw->mii.reg_shift) & priv->hw->mii.reg_mask;
+
+ value |= (priv->clk_csr << priv->hw->mii.clk_csr_shift)
+ & priv->hw->mii.clk_csr_mask;
+ if (priv->plat->has_gmac4) {
+ value |= MII_GMAC4_WRITE;
+ if (phyreg & MII_ADDR_C45) {
+ value |= MII_GMAC4_C45E;
+ value &= ~priv->hw->mii.reg_mask;
+ value |= ((phyreg >> MII_DEVADDR_C45_SHIFT) <<
+ priv->hw->mii.reg_shift) &
+ priv->hw->mii.reg_mask;
+
+ data |= (phyreg & MII_REGADDR_C45_MASK) <<
+ MII_GMAC4_REG_ADDR_SHIFT;
+ }
+ } else {
+ value |= MII_WRITE;
+ }
+
+ /* Wait until any existing MII operation is complete */
+ if (readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000)) {
+ ret = -EBUSY;
+ goto err_disable_clks;
+ }
+
+ /* Set the MII address register to write */
+ writel(data, priv->ioaddr + mii_data);
+ writel(value, priv->ioaddr + mii_address);
+
+ /* Wait until any existing MII operation is complete */
+ ret = readl_poll_timeout(priv->ioaddr + mii_address, v, !(v & MII_BUSY),
+ 100, 10000);
+
+err_disable_clks:
+ pm_runtime_put(priv->device);
+
+ return ret;
+}
+
+/**
+ * stmmac_mdio_reset
+ * @bus: points to the mii_bus structure
+ * Description: reset the MII bus
+ */
+int stmmac_mdio_reset(struct mii_bus *bus)
+{
+#if IS_ENABLED(CONFIG_STMMAC_PLATFORM)
+ struct net_device *ndev = bus->priv;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ unsigned int mii_address = priv->hw->mii.addr;
+
+#ifdef CONFIG_OF
+ if (priv->device->of_node) {
+ struct gpio_desc *reset_gpio;
+ u32 delays[3] = { 0, 0, 0 };
+
+ reset_gpio = devm_gpiod_get_optional(priv->device,
+ "snps,reset",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(reset_gpio))
+ return PTR_ERR(reset_gpio);
+
+ device_property_read_u32_array(priv->device,
+ "snps,reset-delays-us",
+ delays, ARRAY_SIZE(delays));
+
+ if (delays[0])
+ msleep(DIV_ROUND_UP(delays[0], 1000));
+
+ gpiod_set_value_cansleep(reset_gpio, 1);
+ if (delays[1])
+ msleep(DIV_ROUND_UP(delays[1], 1000));
+
+ gpiod_set_value_cansleep(reset_gpio, 0);
+ if (delays[2])
+ msleep(DIV_ROUND_UP(delays[2], 1000));
+ }
+#endif
+
+ /* This is a workaround for problems with the STE101P PHY.
+ * It doesn't complete its reset until at least one clock cycle
+ * on MDC, so perform a dummy mdio read. To be updated for GMAC4
+ * if needed.
+ */
+ if (!priv->plat->has_gmac4)
+ writel(0, priv->ioaddr + mii_address);
+#endif
+ return 0;
+}
+
+int stmmac_xpcs_setup(struct mii_bus *bus)
+{
+ struct net_device *ndev = bus->priv;
+ struct mdio_device *mdiodev;
+ struct stmmac_priv *priv;
+ struct dw_xpcs *xpcs;
+ int mode, addr;
+
+ priv = netdev_priv(ndev);
+ mode = priv->plat->phy_interface;
+
+ /* Try to probe the XPCS by scanning all addresses. */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ mdiodev = mdio_device_create(bus, addr);
+ if (IS_ERR(mdiodev))
+ continue;
+
+ xpcs = xpcs_create(mdiodev, mode);
+ if (IS_ERR_OR_NULL(xpcs)) {
+ mdio_device_free(mdiodev);
+ continue;
+ }
+
+ priv->hw->xpcs = xpcs;
+ break;
+ }
+
+ if (!priv->hw->xpcs) {
+ dev_warn(priv->device, "No xPCS found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+/**
+ * stmmac_mdio_register
+ * @ndev: net device structure
+ * Description: it registers the MII bus
+ */
+int stmmac_mdio_register(struct net_device *ndev)
+{
+ int err = 0;
+ struct mii_bus *new_bus;
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node);
+ struct stmmac_mdio_bus_data *mdio_bus_data = priv->plat->mdio_bus_data;
+ struct device_node *mdio_node = priv->plat->mdio_node;
+ struct device *dev = ndev->dev.parent;
+ struct fwnode_handle *fixed_node;
+ int addr, found, max_addr;
+
+ if (!mdio_bus_data)
+ return 0;
+
+ new_bus = mdiobus_alloc();
+ if (!new_bus)
+ return -ENOMEM;
+
+ if (mdio_bus_data->irqs)
+ memcpy(new_bus->irq, mdio_bus_data->irqs, sizeof(new_bus->irq));
+
+ new_bus->name = "stmmac";
+
+ if (priv->plat->has_gmac4)
+ new_bus->probe_capabilities = MDIOBUS_C22_C45;
+
+ if (priv->plat->has_xgmac) {
+ new_bus->read = &stmmac_xgmac2_mdio_read;
+ new_bus->write = &stmmac_xgmac2_mdio_write;
+
+ /* Right now only C22 phys are supported */
+ max_addr = MII_XGMAC_MAX_C22ADDR + 1;
+
+ /* Check if DT specified an unsupported phy addr */
+ if (priv->plat->phy_addr > MII_XGMAC_MAX_C22ADDR)
+ dev_err(dev, "Unsupported phy_addr (max=%d)\n",
+ MII_XGMAC_MAX_C22ADDR);
+ } else {
+ new_bus->read = &stmmac_mdio_read;
+ new_bus->write = &stmmac_mdio_write;
+ max_addr = PHY_MAX_ADDR;
+ }
+
+ if (mdio_bus_data->needs_reset)
+ new_bus->reset = &stmmac_mdio_reset;
+
+ snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ new_bus->name, priv->plat->bus_id);
+ new_bus->priv = ndev;
+ new_bus->phy_mask = mdio_bus_data->phy_mask;
+ new_bus->parent = priv->device;
+
+ err = of_mdiobus_register(new_bus, mdio_node);
+ if (err == -ENODEV) {
+ err = 0;
+ dev_info(dev, "MDIO bus is disabled\n");
+ goto bus_register_fail;
+ } else if (err) {
+ dev_err_probe(dev, err, "Cannot register the MDIO bus\n");
+ goto bus_register_fail;
+ }
+
+ /* Looks like we need a dummy read for XGMAC only and C45 PHYs */
+ if (priv->plat->has_xgmac)
+ stmmac_xgmac2_mdio_read(new_bus, 0, MII_ADDR_C45);
+
+ /* If fixed-link is set, skip PHY scanning */
+ if (!fwnode)
+ fwnode = dev_fwnode(priv->device);
+
+ if (fwnode) {
+ fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link");
+ if (fixed_node) {
+ fwnode_handle_put(fixed_node);
+ goto bus_register_done;
+ }
+ }
+
+ if (priv->plat->phy_node || mdio_node)
+ goto bus_register_done;
+
+ found = 0;
+ for (addr = 0; addr < max_addr; addr++) {
+ struct phy_device *phydev = mdiobus_get_phy(new_bus, addr);
+
+ if (!phydev)
+ continue;
+
+ /*
+ * If an IRQ was provided to be assigned after
+ * the bus probe, do it here.
+ */
+ if (!mdio_bus_data->irqs &&
+ (mdio_bus_data->probed_phy_irq > 0)) {
+ new_bus->irq[addr] = mdio_bus_data->probed_phy_irq;
+ phydev->irq = mdio_bus_data->probed_phy_irq;
+ }
+
+ /*
+ * If we're going to bind the MAC to this PHY bus,
+ * and no PHY number was provided to the MAC,
+ * use the one probed here.
+ */
+ if (priv->plat->phy_addr == -1)
+ priv->plat->phy_addr = addr;
+
+ phy_attached_info(phydev);
+ found = 1;
+ }
+
+ if (!found && !mdio_node) {
+ dev_warn(dev, "No PHY found\n");
+ err = -ENODEV;
+ goto no_phy_found;
+ }
+
+bus_register_done:
+ priv->mii = new_bus;
+
+ return 0;
+
+no_phy_found:
+ mdiobus_unregister(new_bus);
+bus_register_fail:
+ mdiobus_free(new_bus);
+ return err;
+}
+
+/**
+ * stmmac_mdio_unregister
+ * @ndev: net device structure
+ * Description: it unregisters the MII bus
+ */
+int stmmac_mdio_unregister(struct net_device *ndev)
+{
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ if (!priv->mii)
+ return 0;
+
+ if (priv->hw->xpcs) {
+ mdio_device_free(priv->hw->xpcs->mdiodev);
+ xpcs_destroy(priv->hw->xpcs);
+ }
+
+ mdiobus_unregister(priv->mii);
+ priv->mii->priv = NULL;
+ mdiobus_free(priv->mii);
+ priv->mii = NULL;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
new file mode 100644
index 000000000..644bb54f5
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -0,0 +1,313 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This contains the functions to handle the pci driver.
+
+ Copyright (C) 2011-2012 Vayavya Labs Pvt Ltd
+
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/clk-provider.h>
+#include <linux/pci.h>
+#include <linux/dmi.h>
+
+#include "stmmac.h"
+
+struct stmmac_pci_info {
+ int (*setup)(struct pci_dev *pdev, struct plat_stmmacenet_data *plat);
+};
+
+static void common_default_data(struct plat_stmmacenet_data *plat)
+{
+ plat->clk_csr = 2; /* clk_csr_i = 20-35MHz & MDC = clk_csr_i/16 */
+ plat->has_gmac = 1;
+ plat->force_sf_dma_mode = 1;
+
+ plat->mdio_bus_data->needs_reset = true;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 1;
+ plat->rx_queues_to_use = 1;
+
+ /* Disable Priority config by default */
+ plat->tx_queues_cfg[0].use_prio = false;
+ plat->rx_queues_cfg[0].use_prio = false;
+
+ /* Disable RX queues routing by default */
+ plat->rx_queues_cfg[0].pkt_route = 0x0;
+}
+
+static int stmmac_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ /* Set common default data first */
+ common_default_data(plat);
+
+ plat->bus_id = 1;
+ plat->phy_addr = 0;
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+ /* TODO: AXI */
+
+ return 0;
+}
+
+static const struct stmmac_pci_info stmmac_pci_info = {
+ .setup = stmmac_default_data,
+};
+
+static int snps_gmac5_default_data(struct pci_dev *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ int i;
+
+ plat->clk_csr = 5;
+ plat->has_gmac4 = 1;
+ plat->force_sf_dma_mode = 1;
+ plat->tso_en = 1;
+ plat->pmt = 1;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /* Set the maxmtu to a default of JUMBO_LEN */
+ plat->maxmtu = JUMBO_LEN;
+
+ /* Set default number of RX and TX queues to use */
+ plat->tx_queues_to_use = 4;
+ plat->rx_queues_to_use = 4;
+
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ for (i = 0; i < plat->tx_queues_to_use; i++) {
+ plat->tx_queues_cfg[i].use_prio = false;
+ plat->tx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+ plat->tx_queues_cfg[i].weight = 25;
+ if (i > 0)
+ plat->tx_queues_cfg[i].tbs_en = 1;
+ }
+
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+ for (i = 0; i < plat->rx_queues_to_use; i++) {
+ plat->rx_queues_cfg[i].use_prio = false;
+ plat->rx_queues_cfg[i].mode_to_use = MTL_QUEUE_DCB;
+ plat->rx_queues_cfg[i].pkt_route = 0x0;
+ plat->rx_queues_cfg[i].chan = i;
+ }
+
+ plat->bus_id = 1;
+ plat->phy_addr = -1;
+ plat->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+ plat->dma_cfg->pbl = 32;
+ plat->dma_cfg->pblx8 = true;
+
+ /* Axi Configuration */
+ plat->axi = devm_kzalloc(&pdev->dev, sizeof(*plat->axi), GFP_KERNEL);
+ if (!plat->axi)
+ return -ENOMEM;
+
+ plat->axi->axi_wr_osr_lmt = 31;
+ plat->axi->axi_rd_osr_lmt = 31;
+
+ plat->axi->axi_fb = false;
+ plat->axi->axi_blen[0] = 4;
+ plat->axi->axi_blen[1] = 8;
+ plat->axi->axi_blen[2] = 16;
+ plat->axi->axi_blen[3] = 32;
+
+ return 0;
+}
+
+static const struct stmmac_pci_info snps_gmac5_pci_info = {
+ .setup = snps_gmac5_default_data,
+};
+
+/**
+ * stmmac_pci_probe
+ *
+ * @pdev: pci device pointer
+ * @id: pointer to table of device id/id's.
+ *
+ * Description: This probing function gets called for all PCI devices which
+ * match the ID table and are not "owned" by other driver yet. This function
+ * gets passed a "struct pci_dev *" for each device whose entry in the ID table
+ * matches the device. The probe functions returns zero when the driver choose
+ * to take "ownership" of the device or an error code(-ve no) otherwise.
+ */
+static int stmmac_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ struct stmmac_pci_info *info = (struct stmmac_pci_info *)id->driver_data;
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_resources res;
+ int i;
+ int ret;
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+
+ plat->mdio_bus_data = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg),
+ GFP_KERNEL);
+ if (!plat->dma_cfg)
+ return -ENOMEM;
+
+ plat->safety_feat_cfg = devm_kzalloc(&pdev->dev,
+ sizeof(*plat->safety_feat_cfg),
+ GFP_KERNEL);
+ if (!plat->safety_feat_cfg)
+ return -ENOMEM;
+
+ /* Enable pci device */
+ ret = pcim_enable_device(pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n",
+ __func__);
+ return ret;
+ }
+
+ /* Get the base address of device */
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ ret = pcim_iomap_regions(pdev, BIT(i), pci_name(pdev));
+ if (ret)
+ return ret;
+ break;
+ }
+
+ pci_set_master(pdev);
+
+ ret = info->setup(pdev, plat);
+ if (ret)
+ return ret;
+
+ memset(&res, 0, sizeof(res));
+ res.addr = pcim_iomap_table(pdev)[i];
+ res.wol_irq = pdev->irq;
+ res.irq = pdev->irq;
+
+ plat->safety_feat_cfg->tsoee = 1;
+ plat->safety_feat_cfg->mrxpee = 1;
+ plat->safety_feat_cfg->mestee = 1;
+ plat->safety_feat_cfg->mrxee = 1;
+ plat->safety_feat_cfg->mtxee = 1;
+ plat->safety_feat_cfg->epsi = 1;
+ plat->safety_feat_cfg->edpp = 1;
+ plat->safety_feat_cfg->prtyen = 1;
+ plat->safety_feat_cfg->tmouten = 1;
+
+ return stmmac_dvr_probe(&pdev->dev, plat, &res);
+}
+
+/**
+ * stmmac_pci_remove
+ *
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and releases the PCI resources.
+ */
+static void stmmac_pci_remove(struct pci_dev *pdev)
+{
+ int i;
+
+ stmmac_dvr_remove(&pdev->dev);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (pci_resource_len(pdev, i) == 0)
+ continue;
+ pcim_iounmap_regions(pdev, BIT(i));
+ break;
+ }
+}
+
+static int __maybe_unused stmmac_pci_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ ret = stmmac_suspend(dev);
+ if (ret)
+ return ret;
+
+ ret = pci_save_state(pdev);
+ if (ret)
+ return ret;
+
+ pci_disable_device(pdev);
+ pci_wake_from_d3(pdev, true);
+ return 0;
+}
+
+static int __maybe_unused stmmac_pci_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ int ret;
+
+ pci_restore_state(pdev);
+ pci_set_power_state(pdev, PCI_D0);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ pci_set_master(pdev);
+
+ return stmmac_resume(dev);
+}
+
+static SIMPLE_DEV_PM_OPS(stmmac_pm_ops, stmmac_pci_suspend, stmmac_pci_resume);
+
+/* synthetic ID, no official vendor */
+#define PCI_VENDOR_ID_STMMAC 0x0700
+
+#define PCI_DEVICE_ID_STMMAC_STMMAC 0x1108
+#define PCI_DEVICE_ID_SYNOPSYS_GMAC5_ID 0x7102
+
+static const struct pci_device_id stmmac_id_table[] = {
+ { PCI_DEVICE_DATA(STMMAC, STMMAC, &stmmac_pci_info) },
+ { PCI_DEVICE_DATA(STMICRO, MAC, &stmmac_pci_info) },
+ { PCI_DEVICE_DATA(SYNOPSYS, GMAC5_ID, &snps_gmac5_pci_info) },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, stmmac_id_table);
+
+static struct pci_driver stmmac_pci_driver = {
+ .name = STMMAC_RESOURCE_NAME,
+ .id_table = stmmac_id_table,
+ .probe = stmmac_pci_probe,
+ .remove = stmmac_pci_remove,
+ .driver = {
+ .pm = &stmmac_pm_ops,
+ },
+};
+
+module_pci_driver(stmmac_pci_driver);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
+MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
new file mode 100644
index 000000000..aefc12146
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * stmmac_pcs.h: Physical Coding Sublayer Header File
+ *
+ * Copyright (C) 2016 STMicroelectronics (R&D) Limited
+ * Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+ */
+
+#ifndef __STMMAC_PCS_H__
+#define __STMMAC_PCS_H__
+
+#include <linux/slab.h>
+#include <linux/io.h>
+#include "common.h"
+
+/* PCS registers (AN/TBI/SGMII/RGMII) offsets */
+#define GMAC_AN_CTRL(x) (x) /* AN control */
+#define GMAC_AN_STATUS(x) (x + 0x4) /* AN status */
+#define GMAC_ANE_ADV(x) (x + 0x8) /* ANE Advertisement */
+#define GMAC_ANE_LPA(x) (x + 0xc) /* ANE link partener ability */
+#define GMAC_ANE_EXP(x) (x + 0x10) /* ANE expansion */
+#define GMAC_TBI(x) (x + 0x14) /* TBI extend status */
+
+/* AN Configuration defines */
+#define GMAC_AN_CTRL_RAN BIT(9) /* Restart Auto-Negotiation */
+#define GMAC_AN_CTRL_ANE BIT(12) /* Auto-Negotiation Enable */
+#define GMAC_AN_CTRL_ELE BIT(14) /* External Loopback Enable */
+#define GMAC_AN_CTRL_ECD BIT(16) /* Enable Comma Detect */
+#define GMAC_AN_CTRL_LR BIT(17) /* Lock to Reference */
+#define GMAC_AN_CTRL_SGMRAL BIT(18) /* SGMII RAL Control */
+
+/* AN Status defines */
+#define GMAC_AN_STATUS_LS BIT(2) /* Link Status 0:down 1:up */
+#define GMAC_AN_STATUS_ANA BIT(3) /* Auto-Negotiation Ability */
+#define GMAC_AN_STATUS_ANC BIT(5) /* Auto-Negotiation Complete */
+#define GMAC_AN_STATUS_ES BIT(8) /* Extended Status */
+
+/* ADV and LPA defines */
+#define GMAC_ANE_FD BIT(5)
+#define GMAC_ANE_HD BIT(6)
+#define GMAC_ANE_PSE GENMASK(8, 7)
+#define GMAC_ANE_PSE_SHIFT 7
+#define GMAC_ANE_RFE GENMASK(13, 12)
+#define GMAC_ANE_RFE_SHIFT 12
+#define GMAC_ANE_ACK BIT(14)
+
+/**
+ * dwmac_pcs_isr - TBI, RTBI, or SGMII PHY ISR
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @intr_status: GMAC core interrupt status
+ * @x: pointer to log these events as stats
+ * Description: it is the ISR for PCS events: Auto-Negotiation Completed and
+ * Link status.
+ */
+static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg,
+ unsigned int intr_status,
+ struct stmmac_extra_stats *x)
+{
+ u32 val = readl(ioaddr + GMAC_AN_STATUS(reg));
+
+ if (intr_status & PCS_ANE_IRQ) {
+ x->irq_pcs_ane_n++;
+ if (val & GMAC_AN_STATUS_ANC)
+ pr_info("stmmac_pcs: ANE process completed\n");
+ }
+
+ if (intr_status & PCS_LINK_IRQ) {
+ x->irq_pcs_link_n++;
+ if (val & GMAC_AN_STATUS_LS)
+ pr_info("stmmac_pcs: Link Up\n");
+ else
+ pr_info("stmmac_pcs: Link Down\n");
+ }
+}
+
+/**
+ * dwmac_rane - To restart ANE
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @restart: to restart ANE
+ * Description: this is to just restart the Auto-Negotiation.
+ */
+static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart)
+{
+ u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
+
+ if (restart)
+ value |= GMAC_AN_CTRL_RAN;
+
+ writel(value, ioaddr + GMAC_AN_CTRL(reg));
+}
+
+/**
+ * dwmac_ctrl_ane - To program the AN Control Register.
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @ane: to enable the auto-negotiation
+ * @srgmi_ral: to manage MAC-2-MAC SGMII connections.
+ * @loopback: to cause the PHY to loopback tx data into rx path.
+ * Description: this is the main function to configure the AN control register
+ * and init the ANE, select loopback (usually for debugging purpose) and
+ * configure SGMII RAL.
+ */
+static inline void dwmac_ctrl_ane(void __iomem *ioaddr, u32 reg, bool ane,
+ bool srgmi_ral, bool loopback)
+{
+ u32 value = readl(ioaddr + GMAC_AN_CTRL(reg));
+
+ /* Enable and restart the Auto-Negotiation */
+ if (ane)
+ value |= GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_RAN;
+
+ /* In case of MAC-2-MAC connection, block is configured to operate
+ * according to MAC conf register.
+ */
+ if (srgmi_ral)
+ value |= GMAC_AN_CTRL_SGMRAL;
+
+ if (loopback)
+ value |= GMAC_AN_CTRL_ELE;
+
+ writel(value, ioaddr + GMAC_AN_CTRL(reg));
+}
+
+/**
+ * dwmac_get_adv_lp - Get ADV and LP cap
+ * @ioaddr: IO registers pointer
+ * @reg: Base address of the AN Control Register.
+ * @adv_lp: structure to store the adv,lp status
+ * Description: this is to expose the ANE advertisement and Link partner ability
+ * status to ethtool support.
+ */
+static inline void dwmac_get_adv_lp(void __iomem *ioaddr, u32 reg,
+ struct rgmii_adv *adv_lp)
+{
+ u32 value = readl(ioaddr + GMAC_ANE_ADV(reg));
+
+ if (value & GMAC_ANE_FD)
+ adv_lp->duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv_lp->duplex |= DUPLEX_HALF;
+
+ adv_lp->pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+
+ value = readl(ioaddr + GMAC_ANE_LPA(reg));
+
+ if (value & GMAC_ANE_FD)
+ adv_lp->lp_duplex = DUPLEX_FULL;
+ if (value & GMAC_ANE_HD)
+ adv_lp->lp_duplex = DUPLEX_HALF;
+
+ adv_lp->lp_pause = (value & GMAC_ANE_PSE) >> GMAC_ANE_PSE_SHIFT;
+}
+#endif /* __STMMAC_PCS_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
new file mode 100644
index 000000000..0046a4ee6
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -0,0 +1,840 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ This contains the functions to handle the platform driver.
+
+ Copyright (C) 2007-2011 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+
+#include "stmmac.h"
+#include "stmmac_platform.h"
+
+#ifdef CONFIG_OF
+
+/**
+ * dwmac1000_validate_mcast_bins - validates the number of Multicast filter bins
+ * @dev: struct device of the platform device
+ * @mcast_bins: Multicast filtering bins
+ * Description:
+ * this function validates the number of Multicast filtering bins specified
+ * by the configuration through the device tree. The Synopsys GMAC supports
+ * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
+ * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
+ * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
+ * invalid and will cause the filtering algorithm to use Multicast
+ * promiscuous mode.
+ */
+static int dwmac1000_validate_mcast_bins(struct device *dev, int mcast_bins)
+{
+ int x = mcast_bins;
+
+ switch (x) {
+ case HASH_TABLE_SIZE:
+ case 128:
+ case 256:
+ break;
+ default:
+ x = 0;
+ dev_info(dev, "Hash table entries set to unexpected value %d\n",
+ mcast_bins);
+ break;
+ }
+ return x;
+}
+
+/**
+ * dwmac1000_validate_ucast_entries - validate the Unicast address entries
+ * @dev: struct device of the platform device
+ * @ucast_entries: number of Unicast address entries
+ * Description:
+ * This function validates the number of Unicast address entries supported
+ * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * logic. This function validates a valid, supported configuration is
+ * selected, and defaults to 1 Unicast address if an unsupported
+ * configuration is selected.
+ */
+static int dwmac1000_validate_ucast_entries(struct device *dev,
+ int ucast_entries)
+{
+ int x = ucast_entries;
+
+ switch (x) {
+ case 1 ... 32:
+ case 64:
+ case 128:
+ break;
+ default:
+ x = 1;
+ dev_info(dev, "Unicast table entries set to unexpected value %d\n",
+ ucast_entries);
+ break;
+ }
+ return x;
+}
+
+/**
+ * stmmac_axi_setup - parse DT parameters for programming the AXI register
+ * @pdev: platform device
+ * Description:
+ * if required, from device-tree the AXI internal register can be tuned
+ * by using platform parameters.
+ */
+static struct stmmac_axi *stmmac_axi_setup(struct platform_device *pdev)
+{
+ struct device_node *np;
+ struct stmmac_axi *axi;
+
+ np = of_parse_phandle(pdev->dev.of_node, "snps,axi-config", 0);
+ if (!np)
+ return NULL;
+
+ axi = devm_kzalloc(&pdev->dev, sizeof(*axi), GFP_KERNEL);
+ if (!axi) {
+ of_node_put(np);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ axi->axi_lpi_en = of_property_read_bool(np, "snps,lpi_en");
+ axi->axi_xit_frm = of_property_read_bool(np, "snps,xit_frm");
+ axi->axi_kbbe = of_property_read_bool(np, "snps,kbbe");
+ axi->axi_fb = of_property_read_bool(np, "snps,fb");
+ axi->axi_mb = of_property_read_bool(np, "snps,mb");
+ axi->axi_rb = of_property_read_bool(np, "snps,rb");
+
+ if (of_property_read_u32(np, "snps,wr_osr_lmt", &axi->axi_wr_osr_lmt))
+ axi->axi_wr_osr_lmt = 1;
+ if (of_property_read_u32(np, "snps,rd_osr_lmt", &axi->axi_rd_osr_lmt))
+ axi->axi_rd_osr_lmt = 1;
+ of_property_read_u32_array(np, "snps,blen", axi->axi_blen, AXI_BLEN);
+ of_node_put(np);
+
+ return axi;
+}
+
+/**
+ * stmmac_mtl_setup - parse DT parameters for multiple queues configuration
+ * @pdev: platform device
+ * @plat: enet data
+ */
+static int stmmac_mtl_setup(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ struct device_node *q_node;
+ struct device_node *rx_node;
+ struct device_node *tx_node;
+ u8 queue = 0;
+ int ret = 0;
+
+ /* For backwards-compatibility with device trees that don't have any
+ * snps,mtl-rx-config or snps,mtl-tx-config properties, we fall back
+ * to one RX and TX queues each.
+ */
+ plat->rx_queues_to_use = 1;
+ plat->tx_queues_to_use = 1;
+
+ /* First Queue must always be in DCB mode. As MTL_QUEUE_DCB = 1 we need
+ * to always set this, otherwise Queue will be classified as AVB
+ * (because MTL_QUEUE_AVB = 0).
+ */
+ plat->rx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
+ plat->tx_queues_cfg[0].mode_to_use = MTL_QUEUE_DCB;
+
+ rx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-rx-config", 0);
+ if (!rx_node)
+ return ret;
+
+ tx_node = of_parse_phandle(pdev->dev.of_node, "snps,mtl-tx-config", 0);
+ if (!tx_node) {
+ of_node_put(rx_node);
+ return ret;
+ }
+
+ /* Processing RX queues common config */
+ if (of_property_read_u32(rx_node, "snps,rx-queues-to-use",
+ &plat->rx_queues_to_use))
+ plat->rx_queues_to_use = 1;
+
+ if (of_property_read_bool(rx_node, "snps,rx-sched-sp"))
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+ else if (of_property_read_bool(rx_node, "snps,rx-sched-wsp"))
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_WSP;
+ else
+ plat->rx_sched_algorithm = MTL_RX_ALGORITHM_SP;
+
+ /* Processing individual RX queue config */
+ for_each_child_of_node(rx_node, q_node) {
+ if (queue >= plat->rx_queues_to_use)
+ break;
+
+ if (of_property_read_bool(q_node, "snps,dcb-algorithm"))
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ else if (of_property_read_bool(q_node, "snps,avb-algorithm"))
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+ else
+ plat->rx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+
+ if (of_property_read_u32(q_node, "snps,map-to-dma-channel",
+ &plat->rx_queues_cfg[queue].chan))
+ plat->rx_queues_cfg[queue].chan = queue;
+ /* TODO: Dynamic mapping to be included in the future */
+
+ if (of_property_read_u32(q_node, "snps,priority",
+ &plat->rx_queues_cfg[queue].prio)) {
+ plat->rx_queues_cfg[queue].prio = 0;
+ plat->rx_queues_cfg[queue].use_prio = false;
+ } else {
+ plat->rx_queues_cfg[queue].use_prio = true;
+ }
+
+ /* RX queue specific packet type routing */
+ if (of_property_read_bool(q_node, "snps,route-avcp"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_AVCPQ;
+ else if (of_property_read_bool(q_node, "snps,route-ptp"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_PTPQ;
+ else if (of_property_read_bool(q_node, "snps,route-dcbcp"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_DCBCPQ;
+ else if (of_property_read_bool(q_node, "snps,route-up"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_UPQ;
+ else if (of_property_read_bool(q_node, "snps,route-multi-broad"))
+ plat->rx_queues_cfg[queue].pkt_route = PACKET_MCBCQ;
+ else
+ plat->rx_queues_cfg[queue].pkt_route = 0x0;
+
+ queue++;
+ }
+ if (queue != plat->rx_queues_to_use) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "Not all RX queues were configured\n");
+ goto out;
+ }
+
+ /* Processing TX queues common config */
+ if (of_property_read_u32(tx_node, "snps,tx-queues-to-use",
+ &plat->tx_queues_to_use))
+ plat->tx_queues_to_use = 1;
+
+ if (of_property_read_bool(tx_node, "snps,tx-sched-wrr"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WRR;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-wfq"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_WFQ;
+ else if (of_property_read_bool(tx_node, "snps,tx-sched-dwrr"))
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_DWRR;
+ else
+ plat->tx_sched_algorithm = MTL_TX_ALGORITHM_SP;
+
+ queue = 0;
+
+ /* Processing individual TX queue config */
+ for_each_child_of_node(tx_node, q_node) {
+ if (queue >= plat->tx_queues_to_use)
+ break;
+
+ if (of_property_read_u32(q_node, "snps,weight",
+ &plat->tx_queues_cfg[queue].weight))
+ plat->tx_queues_cfg[queue].weight = 0x10 + queue;
+
+ if (of_property_read_bool(q_node, "snps,dcb-algorithm")) {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ } else if (of_property_read_bool(q_node,
+ "snps,avb-algorithm")) {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+
+ /* Credit Base Shaper parameters used by AVB */
+ if (of_property_read_u32(q_node, "snps,send_slope",
+ &plat->tx_queues_cfg[queue].send_slope))
+ plat->tx_queues_cfg[queue].send_slope = 0x0;
+ if (of_property_read_u32(q_node, "snps,idle_slope",
+ &plat->tx_queues_cfg[queue].idle_slope))
+ plat->tx_queues_cfg[queue].idle_slope = 0x0;
+ if (of_property_read_u32(q_node, "snps,high_credit",
+ &plat->tx_queues_cfg[queue].high_credit))
+ plat->tx_queues_cfg[queue].high_credit = 0x0;
+ if (of_property_read_u32(q_node, "snps,low_credit",
+ &plat->tx_queues_cfg[queue].low_credit))
+ plat->tx_queues_cfg[queue].low_credit = 0x0;
+ } else {
+ plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ }
+
+ if (of_property_read_u32(q_node, "snps,priority",
+ &plat->tx_queues_cfg[queue].prio)) {
+ plat->tx_queues_cfg[queue].prio = 0;
+ plat->tx_queues_cfg[queue].use_prio = false;
+ } else {
+ plat->tx_queues_cfg[queue].use_prio = true;
+ }
+
+ queue++;
+ }
+ if (queue != plat->tx_queues_to_use) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "Not all TX queues were configured\n");
+ goto out;
+ }
+
+out:
+ of_node_put(rx_node);
+ of_node_put(tx_node);
+ of_node_put(q_node);
+
+ return ret;
+}
+
+/**
+ * stmmac_dt_phy - parse device-tree driver parameters to allocate PHY resources
+ * @plat: driver data platform structure
+ * @np: device tree node
+ * @dev: device pointer
+ * Description:
+ * The mdio bus will be allocated in case of a phy transceiver is on board;
+ * it will be NULL if the fixed-link is configured.
+ * If there is the "snps,dwmac-mdio" sub-node the mdio will be allocated
+ * in any case (for DSA, mdio must be registered even if fixed-link).
+ * The table below sums the supported configurations:
+ * -------------------------------
+ * snps,phy-addr | Y
+ * -------------------------------
+ * phy-handle | Y
+ * -------------------------------
+ * fixed-link | N
+ * -------------------------------
+ * snps,dwmac-mdio |
+ * even if | Y
+ * fixed-link |
+ * -------------------------------
+ *
+ * It returns 0 in case of success otherwise -ENODEV.
+ */
+static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
+ struct device_node *np, struct device *dev)
+{
+ bool mdio = !of_phy_is_fixed_link(np);
+ static const struct of_device_id need_mdio_ids[] = {
+ { .compatible = "snps,dwc-qos-ethernet-4.10" },
+ {},
+ };
+
+ if (of_match_node(need_mdio_ids, np)) {
+ plat->mdio_node = of_get_child_by_name(np, "mdio");
+ } else {
+ /**
+ * If snps,dwmac-mdio is passed from DT, always register
+ * the MDIO
+ */
+ for_each_child_of_node(np, plat->mdio_node) {
+ if (of_device_is_compatible(plat->mdio_node,
+ "snps,dwmac-mdio"))
+ break;
+ }
+ }
+
+ if (plat->mdio_node) {
+ dev_dbg(dev, "Found MDIO subnode\n");
+ mdio = true;
+ }
+
+ if (mdio) {
+ plat->mdio_bus_data =
+ devm_kzalloc(dev, sizeof(struct stmmac_mdio_bus_data),
+ GFP_KERNEL);
+ if (!plat->mdio_bus_data)
+ return -ENOMEM;
+
+ plat->mdio_bus_data->needs_reset = true;
+ }
+
+ return 0;
+}
+
+/**
+ * stmmac_of_get_mac_mode - retrieves the interface of the MAC
+ * @np: - device-tree node
+ * Description:
+ * Similar to `of_get_phy_mode()`, this function will retrieve (from
+ * the device-tree) the interface mode on the MAC side. This assumes
+ * that there is mode converter in-between the MAC & PHY
+ * (e.g. GMII-to-RGMII).
+ */
+static int stmmac_of_get_mac_mode(struct device_node *np)
+{
+ const char *pm;
+ int err, i;
+
+ err = of_property_read_string(np, "mac-mode", &pm);
+ if (err < 0)
+ return err;
+
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++) {
+ if (!strcasecmp(pm, phy_modes(i)))
+ return i;
+ }
+
+ return -ENODEV;
+}
+
+/**
+ * stmmac_probe_config_dt - parse device-tree driver parameters
+ * @pdev: platform_device structure
+ * @mac: MAC address to use
+ * Description:
+ * this function is to read the driver parameters from device-tree and
+ * set some private fields that will be used by the main at runtime.
+ */
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct plat_stmmacenet_data *plat;
+ struct stmmac_dma_cfg *dma_cfg;
+ int phy_mode;
+ void *ret;
+ int rc;
+
+ plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return ERR_PTR(-ENOMEM);
+
+ rc = of_get_mac_address(np, mac);
+ if (rc) {
+ if (rc == -EPROBE_DEFER)
+ return ERR_PTR(rc);
+
+ eth_zero_addr(mac);
+ }
+
+ phy_mode = device_get_phy_mode(&pdev->dev);
+ if (phy_mode < 0)
+ return ERR_PTR(phy_mode);
+
+ plat->phy_interface = phy_mode;
+ plat->interface = stmmac_of_get_mac_mode(np);
+ if (plat->interface < 0)
+ plat->interface = plat->phy_interface;
+
+ /* Some wrapper drivers still rely on phy_node. Let's save it while
+ * they are not converted to phylink. */
+ plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* PHYLINK automatically parses the phy-handle property */
+ plat->phylink_node = np;
+
+ /* Get max speed of operation from device tree */
+ of_property_read_u32(np, "max-speed", &plat->max_speed);
+
+ plat->bus_id = of_alias_get_id(np, "ethernet");
+ if (plat->bus_id < 0)
+ plat->bus_id = 0;
+
+ /* Default to phy auto-detection */
+ plat->phy_addr = -1;
+
+ /* Default to get clk_csr from stmmac_clk_csr_set(),
+ * or get clk_csr from device tree.
+ */
+ plat->clk_csr = -1;
+ if (of_property_read_u32(np, "snps,clk-csr", &plat->clk_csr))
+ of_property_read_u32(np, "clk_csr", &plat->clk_csr);
+
+ /* "snps,phy-addr" is not a standard property. Mark it as deprecated
+ * and warn of its use. Remove this when phy node support is added.
+ */
+ if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
+ dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
+
+ /* To Configure PHY by using all device-tree supported properties */
+ rc = stmmac_dt_phy(plat, np, &pdev->dev);
+ if (rc)
+ return ERR_PTR(rc);
+
+ of_property_read_u32(np, "tx-fifo-depth", &plat->tx_fifo_size);
+
+ of_property_read_u32(np, "rx-fifo-depth", &plat->rx_fifo_size);
+
+ plat->force_sf_dma_mode =
+ of_property_read_bool(np, "snps,force_sf_dma_mode");
+
+ plat->en_tx_lpi_clockgating =
+ of_property_read_bool(np, "snps,en-tx-lpi-clockgating");
+
+ /* Set the maxmtu to a default of JUMBO_LEN in case the
+ * parameter is not present in the device tree.
+ */
+ plat->maxmtu = JUMBO_LEN;
+
+ /* Set default value for multicast hash bins */
+ plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+ /* Set default value for unicast filter entries */
+ plat->unicast_filter_entries = 1;
+
+ /*
+ * Currently only the properties needed on SPEAr600
+ * are provided. All other properties should be added
+ * once needed on other platforms.
+ */
+ if (of_device_is_compatible(np, "st,spear600-gmac") ||
+ of_device_is_compatible(np, "snps,dwmac-3.50a") ||
+ of_device_is_compatible(np, "snps,dwmac-3.70a") ||
+ of_device_is_compatible(np, "snps,dwmac")) {
+ /* Note that the max-frame-size parameter as defined in the
+ * ePAPR v1.1 spec is defined as max-frame-size, it's
+ * actually used as the IEEE definition of MAC Client
+ * data, or MTU. The ePAPR specification is confusing as
+ * the definition is max-frame-size, but usage examples
+ * are clearly MTUs
+ */
+ of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
+ of_property_read_u32(np, "snps,multicast-filter-bins",
+ &plat->multicast_filter_bins);
+ of_property_read_u32(np, "snps,perfect-filter-entries",
+ &plat->unicast_filter_entries);
+ plat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
+ &pdev->dev, plat->unicast_filter_entries);
+ plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
+ &pdev->dev, plat->multicast_filter_bins);
+ plat->has_gmac = 1;
+ plat->pmt = 1;
+ }
+
+ if (of_device_is_compatible(np, "snps,dwmac-3.40a")) {
+ plat->has_gmac = 1;
+ plat->enh_desc = 1;
+ plat->tx_coe = 1;
+ plat->bugged_jumbo = 1;
+ plat->pmt = 1;
+ }
+
+ if (of_device_is_compatible(np, "snps,dwmac-4.00") ||
+ of_device_is_compatible(np, "snps,dwmac-4.10a") ||
+ of_device_is_compatible(np, "snps,dwmac-4.20a") ||
+ of_device_is_compatible(np, "snps,dwmac-5.10a")) {
+ plat->has_gmac4 = 1;
+ plat->has_gmac = 0;
+ plat->pmt = 1;
+ plat->tso_en = of_property_read_bool(np, "snps,tso");
+ }
+
+ if (of_device_is_compatible(np, "snps,dwmac-3.610") ||
+ of_device_is_compatible(np, "snps,dwmac-3.710")) {
+ plat->enh_desc = 1;
+ plat->bugged_jumbo = 1;
+ plat->force_sf_dma_mode = 1;
+ }
+
+ if (of_device_is_compatible(np, "snps,dwxgmac")) {
+ plat->has_xgmac = 1;
+ plat->pmt = 1;
+ plat->tso_en = of_property_read_bool(np, "snps,tso");
+ }
+
+ dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
+ GFP_KERNEL);
+ if (!dma_cfg) {
+ stmmac_remove_config_dt(pdev, plat);
+ return ERR_PTR(-ENOMEM);
+ }
+ plat->dma_cfg = dma_cfg;
+
+ of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
+ if (!dma_cfg->pbl)
+ dma_cfg->pbl = DEFAULT_DMA_PBL;
+ of_property_read_u32(np, "snps,txpbl", &dma_cfg->txpbl);
+ of_property_read_u32(np, "snps,rxpbl", &dma_cfg->rxpbl);
+ dma_cfg->pblx8 = !of_property_read_bool(np, "snps,no-pbl-x8");
+
+ dma_cfg->aal = of_property_read_bool(np, "snps,aal");
+ dma_cfg->fixed_burst = of_property_read_bool(np, "snps,fixed-burst");
+ dma_cfg->mixed_burst = of_property_read_bool(np, "snps,mixed-burst");
+
+ plat->force_thresh_dma_mode = of_property_read_bool(np, "snps,force_thresh_dma_mode");
+ if (plat->force_thresh_dma_mode && plat->force_sf_dma_mode) {
+ plat->force_sf_dma_mode = 0;
+ dev_warn(&pdev->dev,
+ "force_sf_dma_mode is ignored if force_thresh_dma_mode is set.\n");
+ }
+
+ of_property_read_u32(np, "snps,ps-speed", &plat->mac_port_sel_speed);
+
+ plat->axi = stmmac_axi_setup(pdev);
+
+ rc = stmmac_mtl_setup(pdev, plat);
+ if (rc) {
+ stmmac_remove_config_dt(pdev, plat);
+ return ERR_PTR(rc);
+ }
+
+ /* clock setup */
+ if (!of_device_is_compatible(np, "snps,dwc-qos-ethernet-4.10")) {
+ plat->stmmac_clk = devm_clk_get(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(plat->stmmac_clk)) {
+ dev_warn(&pdev->dev, "Cannot get CSR clock\n");
+ plat->stmmac_clk = NULL;
+ }
+ clk_prepare_enable(plat->stmmac_clk);
+ }
+
+ plat->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
+ if (IS_ERR(plat->pclk)) {
+ ret = plat->pclk;
+ goto error_pclk_get;
+ }
+ clk_prepare_enable(plat->pclk);
+
+ /* Fall-back to main clock in case of no PTP ref is passed */
+ plat->clk_ptp_ref = devm_clk_get(&pdev->dev, "ptp_ref");
+ if (IS_ERR(plat->clk_ptp_ref)) {
+ plat->clk_ptp_rate = clk_get_rate(plat->stmmac_clk);
+ plat->clk_ptp_ref = NULL;
+ dev_info(&pdev->dev, "PTP uses main clock\n");
+ } else {
+ plat->clk_ptp_rate = clk_get_rate(plat->clk_ptp_ref);
+ dev_dbg(&pdev->dev, "PTP rate %d\n", plat->clk_ptp_rate);
+ }
+
+ plat->stmmac_rst = devm_reset_control_get_optional(&pdev->dev,
+ STMMAC_RESOURCE_NAME);
+ if (IS_ERR(plat->stmmac_rst)) {
+ ret = plat->stmmac_rst;
+ goto error_hw_init;
+ }
+
+ plat->stmmac_ahb_rst = devm_reset_control_get_optional_shared(
+ &pdev->dev, "ahb");
+ if (IS_ERR(plat->stmmac_ahb_rst)) {
+ ret = plat->stmmac_ahb_rst;
+ goto error_hw_init;
+ }
+
+ return plat;
+
+error_hw_init:
+ clk_disable_unprepare(plat->pclk);
+error_pclk_get:
+ clk_disable_unprepare(plat->stmmac_clk);
+
+ return ret;
+}
+
+/**
+ * stmmac_remove_config_dt - undo the effects of stmmac_probe_config_dt()
+ * @pdev: platform_device structure
+ * @plat: driver data platform structure
+ *
+ * Release resources claimed by stmmac_probe_config_dt().
+ */
+void stmmac_remove_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+ clk_disable_unprepare(plat->stmmac_clk);
+ clk_disable_unprepare(plat->pclk);
+ of_node_put(plat->phy_node);
+ of_node_put(plat->mdio_node);
+}
+#else
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+void stmmac_remove_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat)
+{
+}
+#endif /* CONFIG_OF */
+EXPORT_SYMBOL_GPL(stmmac_probe_config_dt);
+EXPORT_SYMBOL_GPL(stmmac_remove_config_dt);
+
+int stmmac_get_platform_resources(struct platform_device *pdev,
+ struct stmmac_resources *stmmac_res)
+{
+ memset(stmmac_res, 0, sizeof(*stmmac_res));
+
+ /* Get IRQ information early to have an ability to ask for deferred
+ * probe if needed before we went too far with resource allocation.
+ */
+ stmmac_res->irq = platform_get_irq_byname(pdev, "macirq");
+ if (stmmac_res->irq < 0)
+ return stmmac_res->irq;
+
+ /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq
+ * The external wake up irq can be passed through the platform code
+ * named as "eth_wake_irq"
+ *
+ * In case the wake up interrupt is not passed from the platform
+ * so the driver will continue to use the mac irq (ndev->irq)
+ */
+ stmmac_res->wol_irq =
+ platform_get_irq_byname_optional(pdev, "eth_wake_irq");
+ if (stmmac_res->wol_irq < 0) {
+ if (stmmac_res->wol_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(&pdev->dev, "IRQ eth_wake_irq not found\n");
+ stmmac_res->wol_irq = stmmac_res->irq;
+ }
+
+ stmmac_res->lpi_irq =
+ platform_get_irq_byname_optional(pdev, "eth_lpi");
+ if (stmmac_res->lpi_irq < 0) {
+ if (stmmac_res->lpi_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ dev_info(&pdev->dev, "IRQ eth_lpi not found\n");
+ }
+
+ stmmac_res->addr = devm_platform_ioremap_resource(pdev, 0);
+
+ return PTR_ERR_OR_ZERO(stmmac_res->addr);
+}
+EXPORT_SYMBOL_GPL(stmmac_get_platform_resources);
+
+/**
+ * stmmac_pltfr_remove
+ * @pdev: platform device pointer
+ * Description: this function calls the main to free the net resources
+ * and calls the platforms hook and release the resources (e.g. mem).
+ */
+int stmmac_pltfr_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct plat_stmmacenet_data *plat = priv->plat;
+ int ret = stmmac_dvr_remove(&pdev->dev);
+
+ if (plat->exit)
+ plat->exit(pdev, plat->bsp_priv);
+
+ stmmac_remove_config_dt(pdev, plat);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(stmmac_pltfr_remove);
+
+/**
+ * stmmac_pltfr_suspend
+ * @dev: device pointer
+ * Description: this function is invoked when suspend the driver and it direcly
+ * call the main suspend function and then, if required, on some platform, it
+ * can call an exit helper.
+ */
+static int __maybe_unused stmmac_pltfr_suspend(struct device *dev)
+{
+ int ret;
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ ret = stmmac_suspend(dev);
+ if (priv->plat->exit)
+ priv->plat->exit(pdev, priv->plat->bsp_priv);
+
+ return ret;
+}
+
+/**
+ * stmmac_pltfr_resume
+ * @dev: device pointer
+ * Description: this function is invoked when resume the driver before calling
+ * the main resume function, on some platforms, it can call own init helper
+ * if required.
+ */
+static int __maybe_unused stmmac_pltfr_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ struct platform_device *pdev = to_platform_device(dev);
+
+ if (priv->plat->init)
+ priv->plat->init(pdev, priv->plat->bsp_priv);
+
+ return stmmac_resume(dev);
+}
+
+static int __maybe_unused stmmac_runtime_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ stmmac_bus_clks_config(priv, false);
+
+ return 0;
+}
+
+static int __maybe_unused stmmac_runtime_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ return stmmac_bus_clks_config(priv, true);
+}
+
+static int __maybe_unused stmmac_pltfr_noirq_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ /* Disable clock in case of PWM is off */
+ clk_disable_unprepare(priv->plat->clk_ptp_ref);
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ if (!netif_running(ndev))
+ return 0;
+
+ if (!device_may_wakeup(priv->device) || !priv->plat->pmt) {
+ /* enable the clk previously disabled */
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
+ if (ret < 0) {
+ netdev_warn(priv->dev,
+ "failed to enable PTP reference clock: %pe\n",
+ ERR_PTR(ret));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+const struct dev_pm_ops stmmac_pltfr_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_suspend, stmmac_pltfr_resume)
+ SET_RUNTIME_PM_OPS(stmmac_runtime_suspend, stmmac_runtime_resume, NULL)
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(stmmac_pltfr_noirq_suspend, stmmac_pltfr_noirq_resume)
+};
+EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
+
+MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
+MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
new file mode 100644
index 000000000..3fff3f59d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*******************************************************************************
+ Copyright (C) 2007-2009 STMicroelectronics Ltd
+
+
+ Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
+*******************************************************************************/
+
+#ifndef __STMMAC_PLATFORM_H__
+#define __STMMAC_PLATFORM_H__
+
+#include "stmmac.h"
+
+struct plat_stmmacenet_data *
+stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac);
+void stmmac_remove_config_dt(struct platform_device *pdev,
+ struct plat_stmmacenet_data *plat);
+
+int stmmac_get_platform_resources(struct platform_device *pdev,
+ struct stmmac_resources *stmmac_res);
+
+int stmmac_pltfr_remove(struct platform_device *pdev);
+extern const struct dev_pm_ops stmmac_pltfr_pm_ops;
+
+static inline void *get_stmmac_bsp_priv(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+ struct stmmac_priv *priv = netdev_priv(ndev);
+
+ return priv->plat->bsp_priv;
+}
+
+#endif /* __STMMAC_PLATFORM_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
new file mode 100644
index 000000000..9c91a3dc8
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*******************************************************************************
+ PTP 1588 clock using the STMMAC.
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+*******************************************************************************/
+#include "stmmac.h"
+#include "stmmac_ptp.h"
+#include "dwmac4.h"
+
+/**
+ * stmmac_adjust_freq
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ppb: desired period change in parts ber billion
+ *
+ * Description: this function will adjust the frequency of hardware clock.
+ */
+static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 diff, addend;
+ int neg_adj = 0;
+ u64 adj;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ addend = priv->default_addend;
+ adj = addend;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+ addend = neg_adj ? (addend - diff) : (addend + diff);
+
+ write_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_config_addend(priv, priv->ptpaddr, addend);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+/**
+ * stmmac_adjust_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @delta: desired change in nanoseconds
+ *
+ * Description: this function will shift/adjust the hardware clock time.
+ */
+static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u32 sec, nsec;
+ u32 quotient, reminder;
+ int neg_adj = 0;
+ bool xmac, est_rst = false;
+ int ret;
+
+ xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+
+ quotient = div_u64_rem(delta, 1000000000ULL, &reminder);
+ sec = quotient;
+ nsec = reminder;
+
+ /* If EST is enabled, disabled it before adjust ptp time. */
+ if (priv->plat->est && priv->plat->est->enable) {
+ est_rst = true;
+ mutex_lock(&priv->plat->est->lock);
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ }
+
+ write_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_adjust_systime(priv, priv->ptpaddr, sec, nsec, neg_adj, xmac);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ /* Caculate new basetime and re-configured EST after PTP time adjust. */
+ if (est_rst) {
+ struct timespec64 current_time, time;
+ ktime_t current_time_ns, basetime;
+ u64 cycle_time;
+
+ mutex_lock(&priv->plat->est->lock);
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ time.tv_nsec = priv->plat->est->btr_reserve[0];
+ time.tv_sec = priv->plat->est->btr_reserve[1];
+ basetime = timespec64_to_ktime(time);
+ cycle_time = (u64)priv->plat->est->ctr[1] * NSEC_PER_SEC +
+ priv->plat->est->ctr[0];
+ time = stmmac_calc_tas_basetime(basetime,
+ current_time_ns,
+ cycle_time);
+
+ priv->plat->est->btr[0] = (u32)time.tv_nsec;
+ priv->plat->est->btr[1] = (u32)time.tv_sec;
+ priv->plat->est->enable = true;
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ if (ret)
+ netdev_err(priv->dev, "failed to configure EST\n");
+ }
+
+ return 0;
+}
+
+/**
+ * stmmac_get_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: pointer to hold time/result
+ *
+ * Description: this function will read the current time from the
+ * hardware clock and store it in @ts.
+ */
+static int stmmac_get_time(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+ u64 ns = 0;
+
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_systime(priv, priv->ptpaddr, &ns);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+/**
+ * stmmac_set_time
+ *
+ * @ptp: pointer to ptp_clock_info structure
+ * @ts: time value to set
+ *
+ * Description: this function will set the current time on the
+ * hardware clock.
+ */
+static int stmmac_set_time(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ unsigned long flags;
+
+ write_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_init_systime(priv, priv->ptpaddr, ts->tv_sec, ts->tv_nsec);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ return 0;
+}
+
+static int stmmac_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+ void __iomem *ptpaddr = priv->ptpaddr;
+ struct stmmac_pps_cfg *cfg;
+ int ret = -EOPNOTSUPP;
+ unsigned long flags;
+ u32 acr_value;
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_PEROUT:
+ /* Reject requests with unsupported flags */
+ if (rq->perout.flags)
+ return -EOPNOTSUPP;
+
+ cfg = &priv->pps[rq->perout.index];
+
+ cfg->start.tv_sec = rq->perout.start.sec;
+ cfg->start.tv_nsec = rq->perout.start.nsec;
+ cfg->period.tv_sec = rq->perout.period.sec;
+ cfg->period.tv_nsec = rq->perout.period.nsec;
+
+ write_lock_irqsave(&priv->ptp_lock, flags);
+ ret = stmmac_flex_pps_config(priv, priv->ioaddr,
+ rq->perout.index, cfg, on,
+ priv->sub_second_inc,
+ priv->systime_flags);
+ write_unlock_irqrestore(&priv->ptp_lock, flags);
+ break;
+ case PTP_CLK_REQ_EXTTS:
+ priv->plat->ext_snapshot_en = on;
+ mutex_lock(&priv->aux_ts_lock);
+ acr_value = readl(ptpaddr + PTP_ACR);
+ acr_value &= ~PTP_ACR_MASK;
+ if (on) {
+ /* Enable External snapshot trigger */
+ acr_value |= priv->plat->ext_snapshot_num;
+ acr_value |= PTP_ACR_ATSFC;
+ netdev_dbg(priv->dev, "Auxiliary Snapshot %d enabled.\n",
+ priv->plat->ext_snapshot_num >>
+ PTP_ACR_ATSEN_SHIFT);
+ } else {
+ netdev_dbg(priv->dev, "Auxiliary Snapshot %d disabled.\n",
+ priv->plat->ext_snapshot_num >>
+ PTP_ACR_ATSEN_SHIFT);
+ }
+ writel(acr_value, ptpaddr + PTP_ACR);
+ mutex_unlock(&priv->aux_ts_lock);
+ /* wait for auxts fifo clear to finish */
+ ret = readl_poll_timeout(ptpaddr + PTP_ACR, acr_value,
+ !(acr_value & PTP_ACR_ATSFC),
+ 10, 10000);
+ break;
+
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * stmmac_get_syncdevicetime
+ * @device: current device time
+ * @system: system counter value read synchronously with device time
+ * @ctx: context provided by timekeeping code
+ * Description: Read device and system clock simultaneously and return the
+ * corrected clock values in ns.
+ **/
+static int stmmac_get_syncdevicetime(ktime_t *device,
+ struct system_counterval_t *system,
+ void *ctx)
+{
+ struct stmmac_priv *priv = (struct stmmac_priv *)ctx;
+
+ if (priv->plat->crosststamp)
+ return priv->plat->crosststamp(device, system, ctx);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int stmmac_getcrosststamp(struct ptp_clock_info *ptp,
+ struct system_device_crosststamp *xtstamp)
+{
+ struct stmmac_priv *priv =
+ container_of(ptp, struct stmmac_priv, ptp_clock_ops);
+
+ return get_device_system_crosststamp(stmmac_get_syncdevicetime,
+ priv, NULL, xtstamp);
+}
+
+/* structure describing a PTP hardware clock */
+static struct ptp_clock_info stmmac_ptp_clock_ops = {
+ .owner = THIS_MODULE,
+ .name = "stmmac ptp",
+ .max_adj = 62500000,
+ .n_alarm = 0,
+ .n_ext_ts = 0, /* will be overwritten in stmmac_ptp_register */
+ .n_per_out = 0, /* will be overwritten in stmmac_ptp_register */
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = stmmac_adjust_freq,
+ .adjtime = stmmac_adjust_time,
+ .gettime64 = stmmac_get_time,
+ .settime64 = stmmac_set_time,
+ .enable = stmmac_enable,
+ .getcrosststamp = stmmac_getcrosststamp,
+};
+
+/**
+ * stmmac_ptp_register
+ * @priv: driver private structure
+ * Description: this function will register the ptp clock driver
+ * to kernel. It also does some house keeping work.
+ */
+void stmmac_ptp_register(struct stmmac_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->dma_cap.pps_out_num; i++) {
+ if (i >= STMMAC_PPS_MAX)
+ break;
+ priv->pps[i].available = true;
+ }
+
+ if (priv->plat->ptp_max_adj)
+ stmmac_ptp_clock_ops.max_adj = priv->plat->ptp_max_adj;
+
+ /* Calculate the clock domain crossing (CDC) error if necessary */
+ priv->plat->cdc_error_adj = 0;
+ if (priv->plat->has_gmac4 && priv->plat->clk_ptp_rate)
+ priv->plat->cdc_error_adj = (2 * NSEC_PER_SEC) / priv->plat->clk_ptp_rate;
+
+ stmmac_ptp_clock_ops.n_per_out = priv->dma_cap.pps_out_num;
+ stmmac_ptp_clock_ops.n_ext_ts = priv->dma_cap.aux_snapshot_n;
+
+ rwlock_init(&priv->ptp_lock);
+ mutex_init(&priv->aux_ts_lock);
+ priv->ptp_clock_ops = stmmac_ptp_clock_ops;
+
+ priv->ptp_clock = ptp_clock_register(&priv->ptp_clock_ops,
+ priv->device);
+ if (IS_ERR(priv->ptp_clock)) {
+ netdev_err(priv->dev, "ptp_clock_register failed\n");
+ priv->ptp_clock = NULL;
+ } else if (priv->ptp_clock)
+ netdev_info(priv->dev, "registered PTP clock\n");
+}
+
+/**
+ * stmmac_ptp_unregister
+ * @priv: driver private structure
+ * Description: this function will remove/unregister the ptp clock driver
+ * from the kernel.
+ */
+void stmmac_ptp_unregister(struct stmmac_priv *priv)
+{
+ if (priv->ptp_clock) {
+ ptp_clock_unregister(priv->ptp_clock);
+ priv->ptp_clock = NULL;
+ pr_debug("Removed PTP HW clock successfully on %s\n",
+ priv->dev->name);
+ }
+
+ mutex_destroy(&priv->aux_ts_lock);
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
new file mode 100644
index 000000000..bf619295d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/******************************************************************************
+ PTP Header file
+
+ Copyright (C) 2013 Vayavya Labs Pvt Ltd
+
+
+ Author: Rayagond Kokatanur <rayagond@vayavyalabs.com>
+******************************************************************************/
+
+#ifndef __STMMAC_PTP_H__
+#define __STMMAC_PTP_H__
+
+#define PTP_XGMAC_OFFSET 0xd00
+#define PTP_GMAC4_OFFSET 0xb00
+#define PTP_GMAC3_X_OFFSET 0x700
+
+/* IEEE 1588 PTP register offsets */
+#define PTP_TCR 0x00 /* Timestamp Control Reg */
+#define PTP_SSIR 0x04 /* Sub-Second Increment Reg */
+#define PTP_STSR 0x08 /* System Time – Seconds Regr */
+#define PTP_STNSR 0x0c /* System Time – Nanoseconds Reg */
+#define PTP_STSUR 0x10 /* System Time – Seconds Update Reg */
+#define PTP_STNSUR 0x14 /* System Time – Nanoseconds Update Reg */
+#define PTP_TAR 0x18 /* Timestamp Addend Reg */
+#define PTP_ACR 0x40 /* Auxiliary Control Reg */
+#define PTP_ATNR 0x48 /* Auxiliary Timestamp - Nanoseconds Reg */
+#define PTP_ATSR 0x4c /* Auxiliary Timestamp - Seconds Reg */
+
+#define PTP_STNSUR_ADDSUB_SHIFT 31
+#define PTP_DIGITAL_ROLLOVER_MODE 0x3B9ACA00 /* 10e9-1 ns */
+#define PTP_BINARY_ROLLOVER_MODE 0x80000000 /* ~0.466 ns */
+
+/* PTP Timestamp control register defines */
+#define PTP_TCR_TSENA BIT(0) /* Timestamp Enable */
+#define PTP_TCR_TSCFUPDT BIT(1) /* Timestamp Fine/Coarse Update */
+#define PTP_TCR_TSINIT BIT(2) /* Timestamp Initialize */
+#define PTP_TCR_TSUPDT BIT(3) /* Timestamp Update */
+#define PTP_TCR_TSTRIG BIT(4) /* Timestamp Interrupt Trigger Enable */
+#define PTP_TCR_TSADDREG BIT(5) /* Addend Reg Update */
+#define PTP_TCR_TSENALL BIT(8) /* Enable Timestamp for All Frames */
+#define PTP_TCR_TSCTRLSSR BIT(9) /* Digital or Binary Rollover Control */
+/* Enable PTP packet Processing for Version 2 Format */
+#define PTP_TCR_TSVER2ENA BIT(10)
+/* Enable Processing of PTP over Ethernet Frames */
+#define PTP_TCR_TSIPENA BIT(11)
+/* Enable Processing of PTP Frames Sent over IPv6-UDP */
+#define PTP_TCR_TSIPV6ENA BIT(12)
+/* Enable Processing of PTP Frames Sent over IPv4-UDP */
+#define PTP_TCR_TSIPV4ENA BIT(13)
+/* Enable Timestamp Snapshot for Event Messages */
+#define PTP_TCR_TSEVNTENA BIT(14)
+/* Enable Snapshot for Messages Relevant to Master */
+#define PTP_TCR_TSMSTRENA BIT(15)
+/* Select PTP packets for Taking Snapshots
+ * On gmac4 specifically:
+ * Enable SYNC, Pdelay_Req, Pdelay_Resp when TSEVNTENA is enabled.
+ * or
+ * Enable SYNC, Follow_Up, Delay_Req, Delay_Resp, Pdelay_Req, Pdelay_Resp,
+ * Pdelay_Resp_Follow_Up if TSEVNTENA is disabled
+ */
+#define PTP_TCR_SNAPTYPSEL_1 BIT(16)
+/* Enable MAC address for PTP Frame Filtering */
+#define PTP_TCR_TSENMACADDR BIT(18)
+
+/* SSIR defines */
+#define PTP_SSIR_SSINC_MAX 0xff
+#define GMAC4_PTP_SSIR_SSINC_SHIFT 16
+
+/* Auxiliary Control defines */
+#define PTP_ACR_ATSFC BIT(0) /* Auxiliary Snapshot FIFO Clear */
+#define PTP_ACR_ATSEN0 BIT(4) /* Auxiliary Snapshot 0 Enable */
+#define PTP_ACR_ATSEN1 BIT(5) /* Auxiliary Snapshot 1 Enable */
+#define PTP_ACR_ATSEN2 BIT(6) /* Auxiliary Snapshot 2 Enable */
+#define PTP_ACR_ATSEN3 BIT(7) /* Auxiliary Snapshot 3 Enable */
+#define PTP_ACR_ATSEN_SHIFT 5 /* Auxiliary Snapshot shift */
+#define PTP_ACR_MASK GENMASK(7, 4) /* Aux Snapshot Mask */
+#define PMC_ART_VALUE0 0x01 /* PMC_ART[15:0] timer value */
+#define PMC_ART_VALUE1 0x02 /* PMC_ART[31:16] timer value */
+#define PMC_ART_VALUE2 0x03 /* PMC_ART[47:32] timer value */
+#define PMC_ART_VALUE3 0x04 /* PMC_ART[63:48] timer value */
+#define GMAC4_ART_TIME_SHIFT 16 /* ART TIME 16-bits shift */
+
+enum aux_snapshot {
+ AUX_SNAPSHOT0 = 0x10,
+ AUX_SNAPSHOT1 = 0x20,
+ AUX_SNAPSHOT2 = 0x40,
+ AUX_SNAPSHOT3 = 0x80,
+};
+
+#endif /* __STMMAC_PTP_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
new file mode 100644
index 000000000..687f43cd4
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c
@@ -0,0 +1,2049 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Synopsys, Inc. and/or its affiliates.
+ * stmmac Selftests Support
+ *
+ * Author: Jose Abreu <joabreu@synopsys.com>
+ */
+
+#include <linux/bitrev.h>
+#include <linux/completion.h>
+#include <linux/crc32.h>
+#include <linux/ethtool.h>
+#include <linux/ip.h>
+#include <linux/phy.h>
+#include <linux/udp.h>
+#include <net/pkt_cls.h>
+#include <net/pkt_sched.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/tc_act/tc_gact.h>
+#include "stmmac.h"
+
+struct stmmachdr {
+ __be32 version;
+ __be64 magic;
+ u8 id;
+} __packed;
+
+#define STMMAC_TEST_PKT_SIZE (sizeof(struct ethhdr) + sizeof(struct iphdr) + \
+ sizeof(struct stmmachdr))
+#define STMMAC_TEST_PKT_MAGIC 0xdeadcafecafedeadULL
+#define STMMAC_LB_TIMEOUT msecs_to_jiffies(200)
+
+struct stmmac_packet_attrs {
+ int vlan;
+ int vlan_id_in;
+ int vlan_id_out;
+ unsigned char *src;
+ const unsigned char *dst;
+ u32 ip_src;
+ u32 ip_dst;
+ int tcp;
+ int sport;
+ int dport;
+ u32 exp_hash;
+ int dont_wait;
+ int timeout;
+ int size;
+ int max_size;
+ int remove_sa;
+ u8 id;
+ int sarc;
+ u16 queue_mapping;
+ u64 timestamp;
+};
+
+static u8 stmmac_test_next_id;
+
+static struct sk_buff *stmmac_test_get_udp_skb(struct stmmac_priv *priv,
+ struct stmmac_packet_attrs *attr)
+{
+ struct sk_buff *skb = NULL;
+ struct udphdr *uhdr = NULL;
+ struct tcphdr *thdr = NULL;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct iphdr *ihdr;
+ int iplen, size;
+
+ size = attr->size + STMMAC_TEST_PKT_SIZE;
+ if (attr->vlan) {
+ size += 4;
+ if (attr->vlan > 1)
+ size += 4;
+ }
+
+ if (attr->tcp)
+ size += sizeof(struct tcphdr);
+ else
+ size += sizeof(struct udphdr);
+
+ if (attr->max_size && (attr->max_size > size))
+ size = attr->max_size;
+
+ skb = netdev_alloc_skb(priv->dev, size);
+ if (!skb)
+ return NULL;
+
+ prefetchw(skb->data);
+
+ if (attr->vlan > 1)
+ ehdr = skb_push(skb, ETH_HLEN + 8);
+ else if (attr->vlan)
+ ehdr = skb_push(skb, ETH_HLEN + 4);
+ else if (attr->remove_sa)
+ ehdr = skb_push(skb, ETH_HLEN - 6);
+ else
+ ehdr = skb_push(skb, ETH_HLEN);
+ skb_reset_mac_header(skb);
+
+ skb_set_network_header(skb, skb->len);
+ ihdr = skb_put(skb, sizeof(*ihdr));
+
+ skb_set_transport_header(skb, skb->len);
+ if (attr->tcp)
+ thdr = skb_put(skb, sizeof(*thdr));
+ else
+ uhdr = skb_put(skb, sizeof(*uhdr));
+
+ if (!attr->remove_sa)
+ eth_zero_addr(ehdr->h_source);
+ eth_zero_addr(ehdr->h_dest);
+ if (attr->src && !attr->remove_sa)
+ ether_addr_copy(ehdr->h_source, attr->src);
+ if (attr->dst)
+ ether_addr_copy(ehdr->h_dest, attr->dst);
+
+ if (!attr->remove_sa) {
+ ehdr->h_proto = htons(ETH_P_IP);
+ } else {
+ __be16 *ptr = (__be16 *)ehdr;
+
+ /* HACK */
+ ptr[3] = htons(ETH_P_IP);
+ }
+
+ if (attr->vlan) {
+ __be16 *tag, *proto;
+
+ if (!attr->remove_sa) {
+ tag = (void *)ehdr + ETH_HLEN;
+ proto = (void *)ehdr + (2 * ETH_ALEN);
+ } else {
+ tag = (void *)ehdr + ETH_HLEN - 6;
+ proto = (void *)ehdr + ETH_ALEN;
+ }
+
+ proto[0] = htons(ETH_P_8021Q);
+ tag[0] = htons(attr->vlan_id_out);
+ tag[1] = htons(ETH_P_IP);
+ if (attr->vlan > 1) {
+ proto[0] = htons(ETH_P_8021AD);
+ tag[1] = htons(ETH_P_8021Q);
+ tag[2] = htons(attr->vlan_id_in);
+ tag[3] = htons(ETH_P_IP);
+ }
+ }
+
+ if (attr->tcp) {
+ thdr->source = htons(attr->sport);
+ thdr->dest = htons(attr->dport);
+ thdr->doff = sizeof(struct tcphdr) / 4;
+ thdr->check = 0;
+ } else {
+ uhdr->source = htons(attr->sport);
+ uhdr->dest = htons(attr->dport);
+ uhdr->len = htons(sizeof(*shdr) + sizeof(*uhdr) + attr->size);
+ if (attr->max_size)
+ uhdr->len = htons(attr->max_size -
+ (sizeof(*ihdr) + sizeof(*ehdr)));
+ uhdr->check = 0;
+ }
+
+ ihdr->ihl = 5;
+ ihdr->ttl = 32;
+ ihdr->version = 4;
+ if (attr->tcp)
+ ihdr->protocol = IPPROTO_TCP;
+ else
+ ihdr->protocol = IPPROTO_UDP;
+ iplen = sizeof(*ihdr) + sizeof(*shdr) + attr->size;
+ if (attr->tcp)
+ iplen += sizeof(*thdr);
+ else
+ iplen += sizeof(*uhdr);
+
+ if (attr->max_size)
+ iplen = attr->max_size - sizeof(*ehdr);
+
+ ihdr->tot_len = htons(iplen);
+ ihdr->frag_off = 0;
+ ihdr->saddr = htonl(attr->ip_src);
+ ihdr->daddr = htonl(attr->ip_dst);
+ ihdr->tos = 0;
+ ihdr->id = 0;
+ ip_send_check(ihdr);
+
+ shdr = skb_put(skb, sizeof(*shdr));
+ shdr->version = 0;
+ shdr->magic = cpu_to_be64(STMMAC_TEST_PKT_MAGIC);
+ attr->id = stmmac_test_next_id;
+ shdr->id = stmmac_test_next_id++;
+
+ if (attr->size)
+ skb_put(skb, attr->size);
+ if (attr->max_size && (attr->max_size > skb->len))
+ skb_put(skb, attr->max_size - skb->len);
+
+ skb->csum = 0;
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ if (attr->tcp) {
+ thdr->check = ~tcp_v4_check(skb->len, ihdr->saddr, ihdr->daddr, 0);
+ skb->csum_start = skb_transport_header(skb) - skb->head;
+ skb->csum_offset = offsetof(struct tcphdr, check);
+ } else {
+ udp4_hwcsum(skb, ihdr->saddr, ihdr->daddr);
+ }
+
+ skb->protocol = htons(ETH_P_IP);
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = priv->dev;
+
+ if (attr->timestamp)
+ skb->tstamp = ns_to_ktime(attr->timestamp);
+
+ return skb;
+}
+
+static struct sk_buff *stmmac_test_get_arp_skb(struct stmmac_priv *priv,
+ struct stmmac_packet_attrs *attr)
+{
+ __be32 ip_src = htonl(attr->ip_src);
+ __be32 ip_dst = htonl(attr->ip_dst);
+ struct sk_buff *skb = NULL;
+
+ skb = arp_create(ARPOP_REQUEST, ETH_P_ARP, ip_dst, priv->dev, ip_src,
+ NULL, attr->src, attr->dst);
+ if (!skb)
+ return NULL;
+
+ skb->pkt_type = PACKET_HOST;
+ skb->dev = priv->dev;
+
+ return skb;
+}
+
+struct stmmac_test_priv {
+ struct stmmac_packet_attrs *packet;
+ struct packet_type pt;
+ struct completion comp;
+ int double_vlan;
+ int vlan_id;
+ int ok;
+};
+
+static int stmmac_test_loopback_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ const unsigned char *dst = tpriv->packet->dst;
+ unsigned char *src = tpriv->packet->src;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct udphdr *uhdr;
+ struct tcphdr *thdr;
+ struct iphdr *ihdr;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (skb_linearize(skb))
+ goto out;
+ if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (dst) {
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, dst))
+ goto out;
+ }
+ if (tpriv->packet->sarc) {
+ if (!ether_addr_equal_unaligned(ehdr->h_source, ehdr->h_dest))
+ goto out;
+ } else if (src) {
+ if (!ether_addr_equal_unaligned(ehdr->h_source, src))
+ goto out;
+ }
+
+ ihdr = ip_hdr(skb);
+ if (tpriv->double_vlan)
+ ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
+
+ if (tpriv->packet->tcp) {
+ if (ihdr->protocol != IPPROTO_TCP)
+ goto out;
+
+ thdr = (struct tcphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (thdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)thdr + sizeof(*thdr));
+ } else {
+ if (ihdr->protocol != IPPROTO_UDP)
+ goto out;
+
+ uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (uhdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
+ }
+
+ if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
+ goto out;
+ if (tpriv->packet->exp_hash && !skb->hash)
+ goto out;
+ if (tpriv->packet->id != shdr->id)
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int __stmmac_test_loopback(struct stmmac_priv *priv,
+ struct stmmac_packet_attrs *attr)
+{
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_loopback_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = attr;
+
+ if (!attr->dont_wait)
+ dev_add_pack(&tpriv->pt);
+
+ skb = stmmac_test_get_udp_skb(priv, attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = dev_direct_xmit(skb, attr->queue_mapping);
+ if (ret)
+ goto cleanup;
+
+ if (attr->dont_wait)
+ goto cleanup;
+
+ if (!attr->timeout)
+ attr->timeout = STMMAC_LB_TIMEOUT;
+
+ wait_for_completion_timeout(&tpriv->comp, attr->timeout);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+
+cleanup:
+ if (!attr->dont_wait)
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_mac_loopback(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+
+ attr.dst = priv->dev->dev_addr;
+ return __stmmac_test_loopback(priv, &attr);
+}
+
+static int stmmac_test_phy_loopback(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (!priv->dev->phydev)
+ return -EOPNOTSUPP;
+
+ ret = phy_loopback(priv->dev->phydev, true);
+ if (ret)
+ return ret;
+
+ attr.dst = priv->dev->dev_addr;
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ phy_loopback(priv->dev->phydev, false);
+ return ret;
+}
+
+static int stmmac_test_mmc(struct stmmac_priv *priv)
+{
+ struct stmmac_counters initial, final;
+ int ret;
+
+ memset(&initial, 0, sizeof(initial));
+ memset(&final, 0, sizeof(final));
+
+ if (!priv->dma_cap.rmon)
+ return -EOPNOTSUPP;
+
+ /* Save previous results into internal struct */
+ stmmac_mmc_read(priv, priv->mmcaddr, &priv->mmc);
+
+ ret = stmmac_test_mac_loopback(priv);
+ if (ret)
+ return ret;
+
+ /* These will be loopback results so no need to save them */
+ stmmac_mmc_read(priv, priv->mmcaddr, &final);
+
+ /*
+ * The number of MMC counters available depends on HW configuration
+ * so we just use this one to validate the feature. I hope there is
+ * not a version without this counter.
+ */
+ if (final.mmc_tx_framecount_g <= initial.mmc_tx_framecount_g)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stmmac_test_eee(struct stmmac_priv *priv)
+{
+ struct stmmac_extra_stats *initial, *final;
+ int retries = 10;
+ int ret;
+
+ if (!priv->dma_cap.eee || !priv->eee_active)
+ return -EOPNOTSUPP;
+
+ initial = kzalloc(sizeof(*initial), GFP_KERNEL);
+ if (!initial)
+ return -ENOMEM;
+
+ final = kzalloc(sizeof(*final), GFP_KERNEL);
+ if (!final) {
+ ret = -ENOMEM;
+ goto out_free_initial;
+ }
+
+ memcpy(initial, &priv->xstats, sizeof(*initial));
+
+ ret = stmmac_test_mac_loopback(priv);
+ if (ret)
+ goto out_free_final;
+
+ /* We have no traffic in the line so, sooner or later it will go LPI */
+ while (--retries) {
+ memcpy(final, &priv->xstats, sizeof(*final));
+
+ if (final->irq_tx_path_in_lpi_mode_n >
+ initial->irq_tx_path_in_lpi_mode_n)
+ break;
+ msleep(100);
+ }
+
+ if (!retries) {
+ ret = -ETIMEDOUT;
+ goto out_free_final;
+ }
+
+ if (final->irq_tx_path_in_lpi_mode_n <=
+ initial->irq_tx_path_in_lpi_mode_n) {
+ ret = -EINVAL;
+ goto out_free_final;
+ }
+
+ if (final->irq_tx_path_exit_lpi_mode_n <=
+ initial->irq_tx_path_exit_lpi_mode_n) {
+ ret = -EINVAL;
+ goto out_free_final;
+ }
+
+out_free_final:
+ kfree(final);
+out_free_initial:
+ kfree(initial);
+ return ret;
+}
+
+static int stmmac_filter_check(struct stmmac_priv *priv)
+{
+ if (!(priv->dev->flags & IFF_PROMISC))
+ return 0;
+
+ netdev_warn(priv->dev, "Test can't be run in promiscuous mode!\n");
+ return -EOPNOTSUPP;
+}
+
+static bool stmmac_hash_check(struct stmmac_priv *priv, unsigned char *addr)
+{
+ int mc_offset = 32 - priv->hw->mcast_bits_log2;
+ struct netdev_hw_addr *ha;
+ u32 hash, hash_nr;
+
+ /* First compute the hash for desired addr */
+ hash = bitrev32(~crc32_le(~0, addr, 6)) >> mc_offset;
+ hash_nr = hash >> 5;
+ hash = 1 << (hash & 0x1f);
+
+ /* Now, check if it collides with any existing one */
+ netdev_for_each_mc_addr(ha, priv->dev) {
+ u32 nr = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN)) >> mc_offset;
+ if (((nr >> 5) == hash_nr) && ((1 << (nr & 0x1f)) == hash))
+ return false;
+ }
+
+ /* No collisions, address is good to go */
+ return true;
+}
+
+static bool stmmac_perfect_check(struct stmmac_priv *priv, unsigned char *addr)
+{
+ struct netdev_hw_addr *ha;
+
+ /* Check if it collides with any existing one */
+ netdev_for_each_uc_addr(ha, priv->dev) {
+ if (!memcmp(ha->addr, addr, ETH_ALEN))
+ return false;
+ }
+
+ /* No collisions, address is good to go */
+ return true;
+}
+
+static int stmmac_test_hfilt(struct stmmac_priv *priv)
+{
+ unsigned char gd_addr[ETH_ALEN] = {0xf1, 0xee, 0xdd, 0xcc, 0xbb, 0xaa};
+ unsigned char bd_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct stmmac_packet_attrs attr = { };
+ int ret, tries = 256;
+
+ ret = stmmac_filter_check(priv);
+ if (ret)
+ return ret;
+
+ if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
+
+ while (--tries) {
+ /* We only need to check the bd_addr for collisions */
+ bd_addr[ETH_ALEN - 1] = tries;
+ if (stmmac_hash_check(priv, bd_addr))
+ break;
+ }
+
+ if (!tries)
+ return -EOPNOTSUPP;
+
+ ret = dev_mc_add(priv->dev, gd_addr);
+ if (ret)
+ return ret;
+
+ attr.dst = gd_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = bd_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = ret ? 0 : -EINVAL;
+
+cleanup:
+ dev_mc_del(priv->dev, gd_addr);
+ return ret;
+}
+
+static int stmmac_test_pfilt(struct stmmac_priv *priv)
+{
+ unsigned char gd_addr[ETH_ALEN] = {0xf0, 0x01, 0x44, 0x55, 0x66, 0x77};
+ unsigned char bd_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct stmmac_packet_attrs attr = { };
+ int ret, tries = 256;
+
+ if (stmmac_filter_check(priv))
+ return -EOPNOTSUPP;
+ if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
+ return -EOPNOTSUPP;
+
+ while (--tries) {
+ /* We only need to check the bd_addr for collisions */
+ bd_addr[ETH_ALEN - 1] = tries;
+ if (stmmac_perfect_check(priv, bd_addr))
+ break;
+ }
+
+ if (!tries)
+ return -EOPNOTSUPP;
+
+ ret = dev_uc_add(priv->dev, gd_addr);
+ if (ret)
+ return ret;
+
+ attr.dst = gd_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = bd_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = ret ? 0 : -EINVAL;
+
+cleanup:
+ dev_uc_del(priv->dev, gd_addr);
+ return ret;
+}
+
+static int stmmac_test_mcfilt(struct stmmac_priv *priv)
+{
+ unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
+ unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct stmmac_packet_attrs attr = { };
+ int ret, tries = 256;
+
+ if (stmmac_filter_check(priv))
+ return -EOPNOTSUPP;
+ if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
+ return -EOPNOTSUPP;
+ if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
+
+ while (--tries) {
+ /* We only need to check the mc_addr for collisions */
+ mc_addr[ETH_ALEN - 1] = tries;
+ if (stmmac_hash_check(priv, mc_addr))
+ break;
+ }
+
+ if (!tries)
+ return -EOPNOTSUPP;
+
+ ret = dev_uc_add(priv->dev, uc_addr);
+ if (ret)
+ return ret;
+
+ attr.dst = uc_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = mc_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = ret ? 0 : -EINVAL;
+
+cleanup:
+ dev_uc_del(priv->dev, uc_addr);
+ return ret;
+}
+
+static int stmmac_test_ucfilt(struct stmmac_priv *priv)
+{
+ unsigned char uc_addr[ETH_ALEN] = {0xf0, 0xff, 0xff, 0xff, 0xff, 0xff};
+ unsigned char mc_addr[ETH_ALEN] = {0xf1, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct stmmac_packet_attrs attr = { };
+ int ret, tries = 256;
+
+ if (stmmac_filter_check(priv))
+ return -EOPNOTSUPP;
+ if (netdev_uc_count(priv->dev) >= priv->hw->unicast_filter_entries)
+ return -EOPNOTSUPP;
+ if (netdev_mc_count(priv->dev) >= priv->hw->multicast_filter_bins)
+ return -EOPNOTSUPP;
+
+ while (--tries) {
+ /* We only need to check the uc_addr for collisions */
+ uc_addr[ETH_ALEN - 1] = tries;
+ if (stmmac_perfect_check(priv, uc_addr))
+ break;
+ }
+
+ if (!tries)
+ return -EOPNOTSUPP;
+
+ ret = dev_mc_add(priv->dev, mc_addr);
+ if (ret)
+ return ret;
+
+ attr.dst = mc_addr;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = uc_addr;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = ret ? 0 : -EINVAL;
+
+cleanup:
+ dev_mc_del(priv->dev, mc_addr);
+ return ret;
+}
+
+static int stmmac_test_flowctrl_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct ethhdr *ehdr;
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal_unaligned(ehdr->h_source, orig_ndev->dev_addr))
+ goto out;
+ if (ehdr->h_proto != htons(ETH_P_PAUSE))
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int stmmac_test_flowctrl(struct stmmac_priv *priv)
+{
+ unsigned char paddr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, 0x00, 0x01};
+ struct phy_device *phydev = priv->dev->phydev;
+ u32 rx_cnt = priv->plat->rx_queues_to_use;
+ struct stmmac_test_priv *tpriv;
+ unsigned int pkt_count;
+ int i, ret = 0;
+
+ if (!phydev || (!phydev->pause && !phydev->asym_pause))
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+ tpriv->pt.type = htons(ETH_P_PAUSE);
+ tpriv->pt.func = stmmac_test_flowctrl_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ dev_add_pack(&tpriv->pt);
+
+ /* Compute minimum number of packets to make FIFO full */
+ pkt_count = priv->plat->rx_fifo_size;
+ if (!pkt_count)
+ pkt_count = priv->dma_cap.rx_fifo_size;
+ pkt_count /= 1400;
+ pkt_count *= 2;
+
+ for (i = 0; i < rx_cnt; i++)
+ stmmac_stop_rx(priv, priv->ioaddr, i);
+
+ ret = dev_set_promiscuity(priv->dev, 1);
+ if (ret)
+ goto cleanup;
+
+ ret = dev_mc_add(priv->dev, paddr);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < pkt_count; i++) {
+ struct stmmac_packet_attrs attr = { };
+
+ attr.dst = priv->dev->dev_addr;
+ attr.dont_wait = true;
+ attr.size = 1400;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup;
+ if (tpriv->ok)
+ break;
+ }
+
+ /* Wait for some time in case RX Watchdog is enabled */
+ msleep(200);
+
+ for (i = 0; i < rx_cnt; i++) {
+ struct stmmac_channel *ch = &priv->channel[i];
+ u32 tail;
+
+ tail = priv->dma_conf.rx_queue[i].dma_rx_phy +
+ (priv->dma_conf.dma_rx_size * sizeof(struct dma_desc));
+
+ stmmac_set_rx_tail_ptr(priv, priv->ioaddr, tail, i);
+ stmmac_start_rx(priv, priv->ioaddr, i);
+
+ local_bh_disable();
+ napi_reschedule(&ch->rx_napi);
+ local_bh_enable();
+ }
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+
+cleanup:
+ dev_mc_del(priv->dev, paddr);
+ dev_set_promiscuity(priv->dev, -1);
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_rss(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+
+ if (!priv->dma_cap.rssen || !priv->rss.enable)
+ return -EOPNOTSUPP;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.exp_hash = true;
+ attr.sport = 0x321;
+ attr.dport = 0x123;
+
+ return __stmmac_test_loopback(priv, &attr);
+}
+
+static int stmmac_test_vlan_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct stmmachdr *shdr;
+ struct ethhdr *ehdr;
+ struct udphdr *uhdr;
+ struct iphdr *ihdr;
+ u16 proto;
+
+ proto = tpriv->double_vlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ skb = skb_unshare(skb, GFP_ATOMIC);
+ if (!skb)
+ goto out;
+
+ if (skb_linearize(skb))
+ goto out;
+ if (skb_headlen(skb) < (STMMAC_TEST_PKT_SIZE - ETH_HLEN))
+ goto out;
+ if (tpriv->vlan_id) {
+ if (skb->vlan_proto != htons(proto))
+ goto out;
+ if (skb->vlan_tci != tpriv->vlan_id) {
+ /* Means filter did not work. */
+ tpriv->ok = false;
+ complete(&tpriv->comp);
+ goto out;
+ }
+ }
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->dst))
+ goto out;
+
+ ihdr = ip_hdr(skb);
+ if (tpriv->double_vlan)
+ ihdr = (struct iphdr *)(skb_network_header(skb) + 4);
+ if (ihdr->protocol != IPPROTO_UDP)
+ goto out;
+
+ uhdr = (struct udphdr *)((u8 *)ihdr + 4 * ihdr->ihl);
+ if (uhdr->dest != htons(tpriv->packet->dport))
+ goto out;
+
+ shdr = (struct stmmachdr *)((u8 *)uhdr + sizeof(*uhdr));
+ if (shdr->magic != cpu_to_be64(STMMAC_TEST_PKT_MAGIC))
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int __stmmac_test_vlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 1;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ ret = dev_direct_xmit(skb, 0);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -EINVAL;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021Q), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_vlanfilt(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ return __stmmac_test_vlanfilt(priv);
+}
+
+static int stmmac_test_vlanfilt_perfect(struct stmmac_priv *priv)
+{
+ int ret, prev_cap = priv->dma_cap.vlhash;
+
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
+ return -EOPNOTSUPP;
+
+ priv->dma_cap.vlhash = 0;
+ ret = __stmmac_test_vlanfilt(priv);
+ priv->dma_cap.vlhash = prev_cap;
+
+ return ret;
+}
+
+static int __stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0, i;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = true;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_8021Q);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+
+ /*
+ * As we use HASH filtering, false positives may appear. This is a
+ * specially chosen ID so that adjacent IDs (+4) have different
+ * HASH values.
+ */
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ for (i = 0; i < 4; i++) {
+ attr.vlan = 2;
+ attr.vlan_id_out = tpriv->vlan_id + i;
+ attr.dst = priv->dev->dev_addr;
+ attr.sport = 9;
+ attr.dport = 9;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ ret = dev_direct_xmit(skb, 0);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+ if (ret && !i) {
+ goto vlan_del;
+ } else if (!ret && i) {
+ ret = -EINVAL;
+ goto vlan_del;
+ } else {
+ ret = 0;
+ }
+
+ tpriv->ok = false;
+ }
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(ETH_P_8021AD), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_dvlanfilt(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.vlhash)
+ return -EOPNOTSUPP;
+
+ return __stmmac_test_dvlanfilt(priv);
+}
+
+static int stmmac_test_dvlanfilt_perfect(struct stmmac_priv *priv)
+{
+ int ret, prev_cap = priv->dma_cap.vlhash;
+
+ if (!(priv->dev->features & NETIF_F_HW_VLAN_STAG_FILTER))
+ return -EOPNOTSUPP;
+
+ priv->dma_cap.vlhash = 0;
+ ret = __stmmac_test_dvlanfilt(priv);
+ priv->dma_cap.vlhash = prev_cap;
+
+ return ret;
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ unsigned char addr[ETH_ALEN] = {0xde, 0xad, 0xbe, 0xef, 0x00, 0x00};
+ struct tc_cls_u32_offload cls_u32 = { };
+ struct stmmac_packet_attrs attr = { };
+ struct tc_action **actions;
+ struct tc_u32_sel *sel;
+ struct tcf_gact *gact;
+ struct tcf_exts *exts;
+ int ret, i, nk = 1;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+ if (!priv->dma_cap.frpsel)
+ return -EOPNOTSUPP;
+
+ sel = kzalloc(struct_size(sel, keys, nk), GFP_KERNEL);
+ if (!sel)
+ return -ENOMEM;
+
+ exts = kzalloc(sizeof(*exts), GFP_KERNEL);
+ if (!exts) {
+ ret = -ENOMEM;
+ goto cleanup_sel;
+ }
+
+ actions = kcalloc(nk, sizeof(*actions), GFP_KERNEL);
+ if (!actions) {
+ ret = -ENOMEM;
+ goto cleanup_exts;
+ }
+
+ gact = kcalloc(nk, sizeof(*gact), GFP_KERNEL);
+ if (!gact) {
+ ret = -ENOMEM;
+ goto cleanup_actions;
+ }
+
+ cls_u32.command = TC_CLSU32_NEW_KNODE;
+ cls_u32.common.chain_index = 0;
+ cls_u32.common.protocol = htons(ETH_P_ALL);
+ cls_u32.knode.exts = exts;
+ cls_u32.knode.sel = sel;
+ cls_u32.knode.handle = 0x123;
+
+ exts->nr_actions = nk;
+ exts->actions = actions;
+ for (i = 0; i < nk; i++) {
+ actions[i] = (struct tc_action *)&gact[i];
+ gact->tcf_action = TC_ACT_SHOT;
+ }
+
+ sel->nkeys = nk;
+ sel->offshift = 0;
+ sel->keys[0].off = 6;
+ sel->keys[0].val = htonl(0xdeadbeef);
+ sel->keys[0].mask = ~0x0;
+
+ ret = stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+ if (ret)
+ goto cleanup_act;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.src = addr;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = ret ? 0 : -EINVAL; /* Shall NOT receive packet */
+
+ cls_u32.command = TC_CLSU32_DELETE_KNODE;
+ stmmac_tc_setup_cls_u32(priv, priv, &cls_u32);
+
+cleanup_act:
+ kfree(gact);
+cleanup_actions:
+ kfree(actions);
+cleanup_exts:
+ kfree(exts);
+cleanup_sel:
+ kfree(sel);
+ return ret;
+}
+#else
+static int stmmac_test_rxp(struct stmmac_priv *priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int stmmac_test_desc_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (!priv->dma_cap.vlins)
+ return -EOPNOTSUPP;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x1;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_desc_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (!priv->dma_cap.vlins)
+ return -EOPNOTSUPP;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ priv->sarc_type = 0x2;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ priv->sarc_type = 0x0;
+ return ret;
+}
+
+static int stmmac_test_reg_sai(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (!priv->dma_cap.vlins)
+ return -EOPNOTSUPP;
+
+ attr.remove_sa = true;
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x2))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_reg_sar(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (!priv->dma_cap.vlins)
+ return -EOPNOTSUPP;
+
+ attr.sarc = true;
+ attr.src = src;
+ attr.dst = priv->dev->dev_addr;
+
+ if (stmmac_sarc_configure(priv, priv->ioaddr, 0x3))
+ return -EOPNOTSUPP;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+
+ stmmac_sarc_configure(priv, priv->ioaddr, 0x0);
+ return ret;
+}
+
+static int stmmac_test_vlanoff_common(struct stmmac_priv *priv, bool svlan)
+{
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ int ret = 0;
+ u16 proto;
+
+ if (!priv->dma_cap.vlins)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ proto = svlan ? ETH_P_8021AD : ETH_P_8021Q;
+
+ tpriv->ok = false;
+ tpriv->double_vlan = svlan;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = svlan ? htons(ETH_P_8021Q) : htons(ETH_P_IP);
+ tpriv->pt.func = stmmac_test_vlan_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+ tpriv->vlan_id = 0x123;
+ dev_add_pack(&tpriv->pt);
+
+ ret = vlan_vid_add(priv->dev, htons(proto), tpriv->vlan_id);
+ if (ret)
+ goto cleanup;
+
+ attr.dst = priv->dev->dev_addr;
+
+ skb = stmmac_test_get_udp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto vlan_del;
+ }
+
+ __vlan_hwaccel_put_tag(skb, htons(proto), tpriv->vlan_id);
+ skb->protocol = htons(proto);
+
+ ret = dev_direct_xmit(skb, 0);
+ if (ret)
+ goto vlan_del;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+
+vlan_del:
+ vlan_vid_del(priv->dev, htons(proto), tpriv->vlan_id);
+cleanup:
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int stmmac_test_vlanoff(struct stmmac_priv *priv)
+{
+ return stmmac_test_vlanoff_common(priv, false);
+}
+
+static int stmmac_test_svlanoff(struct stmmac_priv *priv)
+{
+ if (!priv->dma_cap.dvlan)
+ return -EOPNOTSUPP;
+ return stmmac_test_vlanoff_common(priv, true);
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
+ u32 dst_mask, u32 src_mask)
+{
+ struct flow_dissector_key_ipv4_addrs key, mask;
+ unsigned long dummy_cookie = 0xdeadbeef;
+ struct stmmac_packet_attrs attr = { };
+ struct flow_dissector *dissector;
+ struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
+ struct flow_rule *rule;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+ if (!priv->dma_cap.l3l4fnum)
+ return -EOPNOTSUPP;
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
+ stmmac_rss_configure(priv, priv->hw, NULL,
+ priv->plat->rx_queues_to_use);
+ }
+
+ dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
+ if (!dissector) {
+ ret = -ENOMEM;
+ goto cleanup_rss;
+ }
+
+ dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_IPV4_ADDRS);
+ dissector->offset[FLOW_DISSECTOR_KEY_IPV4_ADDRS] = 0;
+
+ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+ if (!cls) {
+ ret = -ENOMEM;
+ goto cleanup_dissector;
+ }
+
+ cls->common.chain_index = 0;
+ cls->command = FLOW_CLS_REPLACE;
+ cls->cookie = dummy_cookie;
+
+ rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
+ if (!rule) {
+ ret = -ENOMEM;
+ goto cleanup_cls;
+ }
+
+ rule->match.dissector = dissector;
+ rule->match.key = (void *)&key;
+ rule->match.mask = (void *)&mask;
+
+ key.src = htonl(src);
+ key.dst = htonl(dst);
+ mask.src = src_mask;
+ mask.dst = dst_mask;
+
+ cls->rule = rule;
+
+ rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
+ rule->action.num_entries = 1;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.ip_dst = dst;
+ attr.ip_src = src;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup_rule;
+
+ ret = stmmac_tc_setup_cls(priv, priv, cls);
+ if (ret)
+ goto cleanup_rule;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = ret ? 0 : -EINVAL;
+
+ cls->command = FLOW_CLS_DESTROY;
+ stmmac_tc_setup_cls(priv, priv, cls);
+cleanup_rule:
+ kfree(rule);
+cleanup_cls:
+ kfree(cls);
+cleanup_dissector:
+ kfree(dissector);
+cleanup_rss:
+ if (old_enable) {
+ priv->rss.enable = old_enable;
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+ }
+
+ return ret;
+}
+#else
+static int __stmmac_test_l3filt(struct stmmac_priv *priv, u32 dst, u32 src,
+ u32 dst_mask, u32 src_mask)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int stmmac_test_l3filt_da(struct stmmac_priv *priv)
+{
+ u32 addr = 0x10203040;
+
+ return __stmmac_test_l3filt(priv, addr, 0, ~0, 0);
+}
+
+static int stmmac_test_l3filt_sa(struct stmmac_priv *priv)
+{
+ u32 addr = 0x10203040;
+
+ return __stmmac_test_l3filt(priv, 0, addr, 0, ~0);
+}
+
+#ifdef CONFIG_NET_CLS_ACT
+static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
+ u32 dst_mask, u32 src_mask, bool udp)
+{
+ struct {
+ struct flow_dissector_key_basic bkey;
+ struct flow_dissector_key_ports key;
+ } __aligned(BITS_PER_LONG / 8) keys;
+ struct {
+ struct flow_dissector_key_basic bmask;
+ struct flow_dissector_key_ports mask;
+ } __aligned(BITS_PER_LONG / 8) masks;
+ unsigned long dummy_cookie = 0xdeadbeef;
+ struct stmmac_packet_attrs attr = { };
+ struct flow_dissector *dissector;
+ struct flow_cls_offload *cls;
+ int ret, old_enable = 0;
+ struct flow_rule *rule;
+
+ if (!tc_can_offload(priv->dev))
+ return -EOPNOTSUPP;
+ if (!priv->dma_cap.l3l4fnum)
+ return -EOPNOTSUPP;
+ if (priv->rss.enable) {
+ old_enable = priv->rss.enable;
+ priv->rss.enable = false;
+ stmmac_rss_configure(priv, priv->hw, NULL,
+ priv->plat->rx_queues_to_use);
+ }
+
+ dissector = kzalloc(sizeof(*dissector), GFP_KERNEL);
+ if (!dissector) {
+ ret = -ENOMEM;
+ goto cleanup_rss;
+ }
+
+ dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_BASIC);
+ dissector->used_keys |= (1 << FLOW_DISSECTOR_KEY_PORTS);
+ dissector->offset[FLOW_DISSECTOR_KEY_BASIC] = 0;
+ dissector->offset[FLOW_DISSECTOR_KEY_PORTS] = offsetof(typeof(keys), key);
+
+ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+ if (!cls) {
+ ret = -ENOMEM;
+ goto cleanup_dissector;
+ }
+
+ cls->common.chain_index = 0;
+ cls->command = FLOW_CLS_REPLACE;
+ cls->cookie = dummy_cookie;
+
+ rule = kzalloc(struct_size(rule, action.entries, 1), GFP_KERNEL);
+ if (!rule) {
+ ret = -ENOMEM;
+ goto cleanup_cls;
+ }
+
+ rule->match.dissector = dissector;
+ rule->match.key = (void *)&keys;
+ rule->match.mask = (void *)&masks;
+
+ keys.bkey.ip_proto = udp ? IPPROTO_UDP : IPPROTO_TCP;
+ keys.key.src = htons(src);
+ keys.key.dst = htons(dst);
+ masks.mask.src = src_mask;
+ masks.mask.dst = dst_mask;
+
+ cls->rule = rule;
+
+ rule->action.entries[0].id = FLOW_ACTION_DROP;
+ rule->action.entries[0].hw_stats = FLOW_ACTION_HW_STATS_ANY;
+ rule->action.num_entries = 1;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.tcp = !udp;
+ attr.sport = src;
+ attr.dport = dst;
+ attr.ip_dst = 0;
+
+ /* Shall receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto cleanup_rule;
+
+ ret = stmmac_tc_setup_cls(priv, priv, cls);
+ if (ret)
+ goto cleanup_rule;
+
+ /* Shall NOT receive packet */
+ ret = __stmmac_test_loopback(priv, &attr);
+ ret = ret ? 0 : -EINVAL;
+
+ cls->command = FLOW_CLS_DESTROY;
+ stmmac_tc_setup_cls(priv, priv, cls);
+cleanup_rule:
+ kfree(rule);
+cleanup_cls:
+ kfree(cls);
+cleanup_dissector:
+ kfree(dissector);
+cleanup_rss:
+ if (old_enable) {
+ priv->rss.enable = old_enable;
+ stmmac_rss_configure(priv, priv->hw, &priv->rss,
+ priv->plat->rx_queues_to_use);
+ }
+
+ return ret;
+}
+#else
+static int __stmmac_test_l4filt(struct stmmac_priv *priv, u32 dst, u32 src,
+ u32 dst_mask, u32 src_mask, bool udp)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+static int stmmac_test_l4filt_da_tcp(struct stmmac_priv *priv)
+{
+ u16 dummy_port = 0x123;
+
+ return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, false);
+}
+
+static int stmmac_test_l4filt_sa_tcp(struct stmmac_priv *priv)
+{
+ u16 dummy_port = 0x123;
+
+ return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, false);
+}
+
+static int stmmac_test_l4filt_da_udp(struct stmmac_priv *priv)
+{
+ u16 dummy_port = 0x123;
+
+ return __stmmac_test_l4filt(priv, dummy_port, 0, ~0, 0, true);
+}
+
+static int stmmac_test_l4filt_sa_udp(struct stmmac_priv *priv)
+{
+ u16 dummy_port = 0x123;
+
+ return __stmmac_test_l4filt(priv, 0, dummy_port, 0, ~0, true);
+}
+
+static int stmmac_test_arp_validate(struct sk_buff *skb,
+ struct net_device *ndev,
+ struct packet_type *pt,
+ struct net_device *orig_ndev)
+{
+ struct stmmac_test_priv *tpriv = pt->af_packet_priv;
+ struct ethhdr *ehdr;
+ struct arphdr *ahdr;
+
+ ehdr = (struct ethhdr *)skb_mac_header(skb);
+ if (!ether_addr_equal_unaligned(ehdr->h_dest, tpriv->packet->src))
+ goto out;
+
+ ahdr = arp_hdr(skb);
+ if (ahdr->ar_op != htons(ARPOP_REPLY))
+ goto out;
+
+ tpriv->ok = true;
+ complete(&tpriv->comp);
+out:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int stmmac_test_arpoffload(struct stmmac_priv *priv)
+{
+ unsigned char src[ETH_ALEN] = {0x01, 0x02, 0x03, 0x04, 0x05, 0x06};
+ unsigned char dst[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ struct stmmac_packet_attrs attr = { };
+ struct stmmac_test_priv *tpriv;
+ struct sk_buff *skb = NULL;
+ u32 ip_addr = 0xdeadcafe;
+ u32 ip_src = 0xdeadbeef;
+ int ret;
+
+ if (!priv->dma_cap.arpoffsel)
+ return -EOPNOTSUPP;
+
+ tpriv = kzalloc(sizeof(*tpriv), GFP_KERNEL);
+ if (!tpriv)
+ return -ENOMEM;
+
+ tpriv->ok = false;
+ init_completion(&tpriv->comp);
+
+ tpriv->pt.type = htons(ETH_P_ARP);
+ tpriv->pt.func = stmmac_test_arp_validate;
+ tpriv->pt.dev = priv->dev;
+ tpriv->pt.af_packet_priv = tpriv;
+ tpriv->packet = &attr;
+ dev_add_pack(&tpriv->pt);
+
+ attr.src = src;
+ attr.ip_src = ip_src;
+ attr.dst = dst;
+ attr.ip_dst = ip_addr;
+
+ skb = stmmac_test_get_arp_skb(priv, &attr);
+ if (!skb) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+
+ ret = stmmac_set_arp_offload(priv, priv->hw, true, ip_addr);
+ if (ret) {
+ kfree_skb(skb);
+ goto cleanup;
+ }
+
+ ret = dev_set_promiscuity(priv->dev, 1);
+ if (ret) {
+ kfree_skb(skb);
+ goto cleanup;
+ }
+
+ ret = dev_direct_xmit(skb, 0);
+ if (ret)
+ goto cleanup_promisc;
+
+ wait_for_completion_timeout(&tpriv->comp, STMMAC_LB_TIMEOUT);
+ ret = tpriv->ok ? 0 : -ETIMEDOUT;
+
+cleanup_promisc:
+ dev_set_promiscuity(priv->dev, -1);
+cleanup:
+ stmmac_set_arp_offload(priv, priv->hw, false, 0x0);
+ dev_remove_pack(&tpriv->pt);
+ kfree(tpriv);
+ return ret;
+}
+
+static int __stmmac_test_jumbo(struct stmmac_priv *priv, u16 queue)
+{
+ struct stmmac_packet_attrs attr = { };
+ int size = priv->dma_conf.dma_buf_sz;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.max_size = size - ETH_FCS_LEN;
+ attr.queue_mapping = queue;
+
+ return __stmmac_test_loopback(priv, &attr);
+}
+
+static int stmmac_test_jumbo(struct stmmac_priv *priv)
+{
+ return __stmmac_test_jumbo(priv, 0);
+}
+
+static int stmmac_test_mjumbo(struct stmmac_priv *priv)
+{
+ u32 chan, tx_cnt = priv->plat->tx_queues_to_use;
+ int ret;
+
+ if (tx_cnt <= 1)
+ return -EOPNOTSUPP;
+
+ for (chan = 0; chan < tx_cnt; chan++) {
+ ret = __stmmac_test_jumbo(priv, chan);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int stmmac_test_sph(struct stmmac_priv *priv)
+{
+ unsigned long cnt_end, cnt_start = priv->xstats.rx_split_hdr_pkt_n;
+ struct stmmac_packet_attrs attr = { };
+ int ret;
+
+ if (!priv->sph)
+ return -EOPNOTSUPP;
+
+ /* Check for UDP first */
+ attr.dst = priv->dev->dev_addr;
+ attr.tcp = false;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ return ret;
+
+ cnt_end = priv->xstats.rx_split_hdr_pkt_n;
+ if (cnt_end <= cnt_start)
+ return -EINVAL;
+
+ /* Check for TCP now */
+ cnt_start = cnt_end;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.tcp = true;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ return ret;
+
+ cnt_end = priv->xstats.rx_split_hdr_pkt_n;
+ if (cnt_end <= cnt_start)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int stmmac_test_tbs(struct stmmac_priv *priv)
+{
+#define STMMAC_TBS_LT_OFFSET (500 * 1000 * 1000) /* 500 ms*/
+ struct stmmac_packet_attrs attr = { };
+ struct tc_etf_qopt_offload qopt;
+ u64 start_time, curr_time = 0;
+ unsigned long flags;
+ int ret, i;
+
+ if (!priv->hwts_tx_en)
+ return -EOPNOTSUPP;
+
+ /* Find first TBS enabled Queue, if any */
+ for (i = 0; i < priv->plat->tx_queues_to_use; i++)
+ if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_AVAIL)
+ break;
+
+ if (i >= priv->plat->tx_queues_to_use)
+ return -EOPNOTSUPP;
+
+ qopt.enable = true;
+ qopt.queue = i;
+
+ ret = stmmac_tc_setup_etf(priv, priv, &qopt);
+ if (ret)
+ return ret;
+
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ if (!curr_time) {
+ ret = -EOPNOTSUPP;
+ goto fail_disable;
+ }
+
+ start_time = curr_time;
+ curr_time += STMMAC_TBS_LT_OFFSET;
+
+ attr.dst = priv->dev->dev_addr;
+ attr.timestamp = curr_time;
+ attr.timeout = nsecs_to_jiffies(2 * STMMAC_TBS_LT_OFFSET);
+ attr.queue_mapping = i;
+
+ ret = __stmmac_test_loopback(priv, &attr);
+ if (ret)
+ goto fail_disable;
+
+ /* Check if expected time has elapsed */
+ read_lock_irqsave(&priv->ptp_lock, flags);
+ stmmac_get_systime(priv, priv->ptpaddr, &curr_time);
+ read_unlock_irqrestore(&priv->ptp_lock, flags);
+
+ if ((curr_time - start_time) < STMMAC_TBS_LT_OFFSET)
+ ret = -EINVAL;
+
+fail_disable:
+ qopt.enable = false;
+ stmmac_tc_setup_etf(priv, priv, &qopt);
+ return ret;
+}
+
+#define STMMAC_LOOPBACK_NONE 0
+#define STMMAC_LOOPBACK_MAC 1
+#define STMMAC_LOOPBACK_PHY 2
+
+static const struct stmmac_test {
+ char name[ETH_GSTRING_LEN];
+ int lb;
+ int (*fn)(struct stmmac_priv *priv);
+} stmmac_selftests[] = {
+ {
+ .name = "MAC Loopback ",
+ .lb = STMMAC_LOOPBACK_MAC,
+ .fn = stmmac_test_mac_loopback,
+ }, {
+ .name = "PHY Loopback ",
+ .lb = STMMAC_LOOPBACK_NONE, /* Test will handle it */
+ .fn = stmmac_test_phy_loopback,
+ }, {
+ .name = "MMC Counters ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_mmc,
+ }, {
+ .name = "EEE ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_eee,
+ }, {
+ .name = "Hash Filter MC ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_hfilt,
+ }, {
+ .name = "Perfect Filter UC ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_pfilt,
+ }, {
+ .name = "MC Filter ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_mcfilt,
+ }, {
+ .name = "UC Filter ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_ucfilt,
+ }, {
+ .name = "Flow Control ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_flowctrl,
+ }, {
+ .name = "RSS ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rss,
+ }, {
+ .name = "VLAN Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanfilt,
+ }, {
+ .name = "VLAN Filtering (perf) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanfilt_perfect,
+ }, {
+ .name = "Double VLAN Filter ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_dvlanfilt,
+ }, {
+ .name = "Double VLAN Filter (perf) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_dvlanfilt_perfect,
+ }, {
+ .name = "Flexible RX Parser ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_rxp,
+ }, {
+ .name = "SA Insertion (desc) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sai,
+ }, {
+ .name = "SA Replacement (desc) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_desc_sar,
+ }, {
+ .name = "SA Insertion (reg) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sai,
+ }, {
+ .name = "SA Replacement (reg) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_reg_sar,
+ }, {
+ .name = "VLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_vlanoff,
+ }, {
+ .name = "SVLAN TX Insertion ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_svlanoff,
+ }, {
+ .name = "L3 DA Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l3filt_da,
+ }, {
+ .name = "L3 SA Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l3filt_sa,
+ }, {
+ .name = "L4 DA TCP Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l4filt_da_tcp,
+ }, {
+ .name = "L4 SA TCP Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l4filt_sa_tcp,
+ }, {
+ .name = "L4 DA UDP Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l4filt_da_udp,
+ }, {
+ .name = "L4 SA UDP Filtering ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_l4filt_sa_udp,
+ }, {
+ .name = "ARP Offload ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_arpoffload,
+ }, {
+ .name = "Jumbo Frame ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_jumbo,
+ }, {
+ .name = "Multichannel Jumbo ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_mjumbo,
+ }, {
+ .name = "Split Header ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_sph,
+ }, {
+ .name = "TBS (ETF Scheduler) ",
+ .lb = STMMAC_LOOPBACK_PHY,
+ .fn = stmmac_test_tbs,
+ },
+};
+
+void stmmac_selftest_run(struct net_device *dev,
+ struct ethtool_test *etest, u64 *buf)
+{
+ struct stmmac_priv *priv = netdev_priv(dev);
+ int count = stmmac_selftest_get_count(priv);
+ int i, ret;
+
+ memset(buf, 0, sizeof(*buf) * count);
+ stmmac_test_next_id = 0;
+
+ if (etest->flags != ETH_TEST_FL_OFFLINE) {
+ netdev_err(priv->dev, "Only offline tests are supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ } else if (!netif_carrier_ok(dev)) {
+ netdev_err(priv->dev, "You need valid Link to execute tests\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ return;
+ }
+
+ /* Wait for queues drain */
+ msleep(200);
+
+ for (i = 0; i < count; i++) {
+ ret = 0;
+
+ switch (stmmac_selftests[i].lb) {
+ case STMMAC_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, true);
+ if (!ret)
+ break;
+ fallthrough;
+ case STMMAC_LOOPBACK_MAC:
+ ret = stmmac_set_mac_loopback(priv, priv->ioaddr, true);
+ break;
+ case STMMAC_LOOPBACK_NONE:
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ /*
+ * First tests will always be MAC / PHY loobpack. If any of
+ * them is not supported we abort earlier.
+ */
+ if (ret) {
+ netdev_err(priv->dev, "Loopback is not supported\n");
+ etest->flags |= ETH_TEST_FL_FAILED;
+ break;
+ }
+
+ ret = stmmac_selftests[i].fn(priv);
+ if (ret && (ret != -EOPNOTSUPP))
+ etest->flags |= ETH_TEST_FL_FAILED;
+ buf[i] = ret;
+
+ switch (stmmac_selftests[i].lb) {
+ case STMMAC_LOOPBACK_PHY:
+ ret = -EOPNOTSUPP;
+ if (dev->phydev)
+ ret = phy_loopback(dev->phydev, false);
+ if (!ret)
+ break;
+ fallthrough;
+ case STMMAC_LOOPBACK_MAC:
+ stmmac_set_mac_loopback(priv, priv->ioaddr, false);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void stmmac_selftest_get_strings(struct stmmac_priv *priv, u8 *data)
+{
+ u8 *p = data;
+ int i;
+
+ for (i = 0; i < stmmac_selftest_get_count(priv); i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%2d. %s", i + 1,
+ stmmac_selftests[i].name);
+ p += ETH_GSTRING_LEN;
+ }
+}
+
+int stmmac_selftest_get_count(struct stmmac_priv *priv)
+{
+ return ARRAY_SIZE(stmmac_selftests);
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
new file mode 100644
index 000000000..390c90083
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c
@@ -0,0 +1,1115 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/*
+ * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
+ * stmmac TC Handling (HW only)
+ */
+
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_gact.h>
+#include "common.h"
+#include "dwmac4.h"
+#include "dwmac5.h"
+#include "stmmac.h"
+
+static void tc_fill_all_pass_entry(struct stmmac_tc_entry *entry)
+{
+ memset(entry, 0, sizeof(*entry));
+ entry->in_use = true;
+ entry->is_last = true;
+ entry->is_frag = false;
+ entry->prio = ~0x0;
+ entry->handle = 0;
+ entry->val.match_data = 0x0;
+ entry->val.match_en = 0x0;
+ entry->val.af = 1;
+ entry->val.dma_ch_no = 0x0;
+}
+
+static struct stmmac_tc_entry *tc_find_entry(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls,
+ bool free)
+{
+ struct stmmac_tc_entry *entry, *first = NULL, *dup = NULL;
+ u32 loc = cls->knode.handle;
+ int i;
+
+ for (i = 0; i < priv->tc_entries_max; i++) {
+ entry = &priv->tc_entries[i];
+ if (!entry->in_use && !first && free)
+ first = entry;
+ if ((entry->handle == loc) && !free && !entry->is_frag)
+ dup = entry;
+ }
+
+ if (dup)
+ return dup;
+ if (first) {
+ first->handle = loc;
+ first->in_use = true;
+
+ /* Reset HW values */
+ memset(&first->val, 0, sizeof(first->val));
+ }
+
+ return first;
+}
+
+static int tc_fill_actions(struct stmmac_tc_entry *entry,
+ struct stmmac_tc_entry *frag,
+ struct tc_cls_u32_offload *cls)
+{
+ struct stmmac_tc_entry *action_entry = entry;
+ const struct tc_action *act;
+ struct tcf_exts *exts;
+ int i;
+
+ exts = cls->knode.exts;
+ if (!tcf_exts_has_actions(exts))
+ return -EINVAL;
+ if (frag)
+ action_entry = frag;
+
+ tcf_exts_for_each_action(i, act, exts) {
+ /* Accept */
+ if (is_tcf_gact_ok(act)) {
+ action_entry->val.af = 1;
+ break;
+ }
+ /* Drop */
+ if (is_tcf_gact_shot(act)) {
+ action_entry->val.rf = 1;
+ break;
+ }
+
+ /* Unsupported */
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int tc_fill_entry(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ struct stmmac_tc_entry *entry, *frag = NULL;
+ struct tc_u32_sel *sel = cls->knode.sel;
+ u32 off, data, mask, real_off, rem;
+ u32 prio = cls->common.prio << 16;
+ int ret;
+
+ /* Only 1 match per entry */
+ if (sel->nkeys <= 0 || sel->nkeys > 1)
+ return -EINVAL;
+
+ off = sel->keys[0].off << sel->offshift;
+ data = sel->keys[0].val;
+ mask = sel->keys[0].mask;
+
+ switch (ntohs(cls->common.protocol)) {
+ case ETH_P_ALL:
+ break;
+ case ETH_P_IP:
+ off += ETH_HLEN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (off > priv->tc_off_max)
+ return -EINVAL;
+
+ real_off = off / 4;
+ rem = off % 4;
+
+ entry = tc_find_entry(priv, cls, true);
+ if (!entry)
+ return -EINVAL;
+
+ if (rem) {
+ frag = tc_find_entry(priv, cls, true);
+ if (!frag) {
+ ret = -EINVAL;
+ goto err_unuse;
+ }
+
+ entry->frag_ptr = frag;
+ entry->val.match_en = (mask << (rem * 8)) &
+ GENMASK(31, rem * 8);
+ entry->val.match_data = (data << (rem * 8)) &
+ GENMASK(31, rem * 8);
+ entry->val.frame_offset = real_off;
+ entry->prio = prio;
+
+ frag->val.match_en = (mask >> (rem * 8)) &
+ GENMASK(rem * 8 - 1, 0);
+ frag->val.match_data = (data >> (rem * 8)) &
+ GENMASK(rem * 8 - 1, 0);
+ frag->val.frame_offset = real_off + 1;
+ frag->prio = prio;
+ frag->is_frag = true;
+ } else {
+ entry->frag_ptr = NULL;
+ entry->val.match_en = mask;
+ entry->val.match_data = data;
+ entry->val.frame_offset = real_off;
+ entry->prio = prio;
+ }
+
+ ret = tc_fill_actions(entry, frag, cls);
+ if (ret)
+ goto err_unuse;
+
+ return 0;
+
+err_unuse:
+ if (frag)
+ frag->in_use = false;
+ entry->in_use = false;
+ return ret;
+}
+
+static void tc_unfill_entry(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ struct stmmac_tc_entry *entry;
+
+ entry = tc_find_entry(priv, cls, false);
+ if (!entry)
+ return;
+
+ entry->in_use = false;
+ if (entry->frag_ptr) {
+ entry = entry->frag_ptr;
+ entry->is_frag = false;
+ entry->in_use = false;
+ }
+}
+
+static int tc_config_knode(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ int ret;
+
+ ret = tc_fill_entry(priv, cls);
+ if (ret)
+ return ret;
+
+ ret = stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
+ priv->tc_entries_max);
+ if (ret)
+ goto err_unfill;
+
+ return 0;
+
+err_unfill:
+ tc_unfill_entry(priv, cls);
+ return ret;
+}
+
+static int tc_delete_knode(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ /* Set entry and fragments as not used */
+ tc_unfill_entry(priv, cls);
+
+ return stmmac_rxp_config(priv, priv->hw->pcsr, priv->tc_entries,
+ priv->tc_entries_max);
+}
+
+static int tc_setup_cls_u32(struct stmmac_priv *priv,
+ struct tc_cls_u32_offload *cls)
+{
+ switch (cls->command) {
+ case TC_CLSU32_REPLACE_KNODE:
+ tc_unfill_entry(priv, cls);
+ fallthrough;
+ case TC_CLSU32_NEW_KNODE:
+ return tc_config_knode(priv, cls);
+ case TC_CLSU32_DELETE_KNODE:
+ return tc_delete_knode(priv, cls);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int tc_rfs_init(struct stmmac_priv *priv)
+{
+ int i;
+
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN] = 8;
+ priv->rfs_entries_max[STMMAC_RFS_T_LLDP] = 1;
+ priv->rfs_entries_max[STMMAC_RFS_T_1588] = 1;
+
+ for (i = 0; i < STMMAC_RFS_T_MAX; i++)
+ priv->rfs_entries_total += priv->rfs_entries_max[i];
+
+ priv->rfs_entries = devm_kcalloc(priv->device,
+ priv->rfs_entries_total,
+ sizeof(*priv->rfs_entries),
+ GFP_KERNEL);
+ if (!priv->rfs_entries)
+ return -ENOMEM;
+
+ dev_info(priv->device, "Enabled RFS Flow TC (entries=%d)\n",
+ priv->rfs_entries_total);
+
+ return 0;
+}
+
+static int tc_init(struct stmmac_priv *priv)
+{
+ struct dma_features *dma_cap = &priv->dma_cap;
+ unsigned int count;
+ int ret, i;
+
+ if (dma_cap->l3l4fnum) {
+ priv->flow_entries_max = dma_cap->l3l4fnum;
+ priv->flow_entries = devm_kcalloc(priv->device,
+ dma_cap->l3l4fnum,
+ sizeof(*priv->flow_entries),
+ GFP_KERNEL);
+ if (!priv->flow_entries)
+ return -ENOMEM;
+
+ for (i = 0; i < priv->flow_entries_max; i++)
+ priv->flow_entries[i].idx = i;
+
+ dev_info(priv->device, "Enabled L3L4 Flow TC (entries=%d)\n",
+ priv->flow_entries_max);
+ }
+
+ ret = tc_rfs_init(priv);
+ if (ret)
+ return -ENOMEM;
+
+ if (!priv->plat->fpe_cfg) {
+ priv->plat->fpe_cfg = devm_kzalloc(priv->device,
+ sizeof(*priv->plat->fpe_cfg),
+ GFP_KERNEL);
+ if (!priv->plat->fpe_cfg)
+ return -ENOMEM;
+ } else {
+ memset(priv->plat->fpe_cfg, 0, sizeof(*priv->plat->fpe_cfg));
+ }
+
+ /* Fail silently as we can still use remaining features, e.g. CBS */
+ if (!dma_cap->frpsel)
+ return 0;
+
+ switch (dma_cap->frpbs) {
+ case 0x0:
+ priv->tc_off_max = 64;
+ break;
+ case 0x1:
+ priv->tc_off_max = 128;
+ break;
+ case 0x2:
+ priv->tc_off_max = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ switch (dma_cap->frpes) {
+ case 0x0:
+ count = 64;
+ break;
+ case 0x1:
+ count = 128;
+ break;
+ case 0x2:
+ count = 256;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Reserve one last filter which lets all pass */
+ priv->tc_entries_max = count;
+ priv->tc_entries = devm_kcalloc(priv->device,
+ count, sizeof(*priv->tc_entries), GFP_KERNEL);
+ if (!priv->tc_entries)
+ return -ENOMEM;
+
+ tc_fill_all_pass_entry(&priv->tc_entries[count - 1]);
+
+ dev_info(priv->device, "Enabling HW TC (entries=%d, max_off=%d)\n",
+ priv->tc_entries_max, priv->tc_off_max);
+
+ return 0;
+}
+
+static int tc_setup_cbs(struct stmmac_priv *priv,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ u32 tx_queues_count = priv->plat->tx_queues_to_use;
+ u32 queue = qopt->queue;
+ u32 ptr, speed_div;
+ u32 mode_to_use;
+ u64 value;
+ int ret;
+
+ /* Queue 0 is not AVB capable */
+ if (queue <= 0 || queue >= tx_queues_count)
+ return -EINVAL;
+ if (!priv->dma_cap.av)
+ return -EOPNOTSUPP;
+
+ /* Port Transmit Rate and Speed Divider */
+ switch (priv->speed) {
+ case SPEED_10000:
+ ptr = 32;
+ speed_div = 10000000;
+ break;
+ case SPEED_5000:
+ ptr = 32;
+ speed_div = 5000000;
+ break;
+ case SPEED_2500:
+ ptr = 8;
+ speed_div = 2500000;
+ break;
+ case SPEED_1000:
+ ptr = 8;
+ speed_div = 1000000;
+ break;
+ case SPEED_100:
+ ptr = 4;
+ speed_div = 100000;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
+ if (mode_to_use == MTL_QUEUE_DCB && qopt->enable) {
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_AVB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB;
+ } else if (!qopt->enable) {
+ ret = stmmac_dma_qmode(priv, priv->ioaddr, queue,
+ MTL_QUEUE_DCB);
+ if (ret)
+ return ret;
+
+ priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB;
+ }
+
+ /* Final adjustments for HW */
+ value = div_s64(qopt->idleslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].idle_slope = value & GENMASK(31, 0);
+
+ value = div_s64(-qopt->sendslope * 1024ll * ptr, speed_div);
+ priv->plat->tx_queues_cfg[queue].send_slope = value & GENMASK(31, 0);
+
+ value = qopt->hicredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].high_credit = value & GENMASK(31, 0);
+
+ value = qopt->locredit * 1024ll * 8;
+ priv->plat->tx_queues_cfg[queue].low_credit = value & GENMASK(31, 0);
+
+ ret = stmmac_config_cbs(priv, priv->hw,
+ priv->plat->tx_queues_cfg[queue].send_slope,
+ priv->plat->tx_queues_cfg[queue].idle_slope,
+ priv->plat->tx_queues_cfg[queue].high_credit,
+ priv->plat->tx_queues_cfg[queue].low_credit,
+ queue);
+ if (ret)
+ return ret;
+
+ dev_info(priv->device, "CBS queue %d: send %d, idle %d, hi %d, lo %d\n",
+ queue, qopt->sendslope, qopt->idleslope,
+ qopt->hicredit, qopt->locredit);
+ return 0;
+}
+
+static int tc_parse_flow_actions(struct stmmac_priv *priv,
+ struct flow_action *action,
+ struct stmmac_flow_entry *entry,
+ struct netlink_ext_ack *extack)
+{
+ struct flow_action_entry *act;
+ int i;
+
+ if (!flow_action_has_entries(action))
+ return -EINVAL;
+
+ if (!flow_action_basic_hw_stats_check(action, extack))
+ return -EOPNOTSUPP;
+
+ flow_action_for_each(i, act, action) {
+ switch (act->id) {
+ case FLOW_ACTION_DROP:
+ entry->action |= STMMAC_FLOW_ACTION_DROP;
+ return 0;
+ default:
+ break;
+ }
+ }
+
+ /* Nothing to do, maybe inverse filter ? */
+ return 0;
+}
+
+#define ETHER_TYPE_FULL_MASK cpu_to_be16(~0)
+
+static int tc_add_basic_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls,
+ struct stmmac_flow_entry *entry)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ struct flow_match_basic match;
+
+ /* Nothing to do here */
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
+ return -EINVAL;
+
+ flow_rule_match_basic(rule, &match);
+
+ entry->ip_proto = match.key->ip_proto;
+ return 0;
+}
+
+static int tc_add_ip4_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls,
+ struct stmmac_flow_entry *entry)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
+ struct flow_match_ipv4_addrs match;
+ u32 hw_match;
+ int ret;
+
+ /* Nothing to do here */
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS))
+ return -EINVAL;
+
+ flow_rule_match_ipv4_addrs(rule, &match);
+ hw_match = ntohl(match.key->src) & ntohl(match.mask->src);
+ if (hw_match) {
+ ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
+ false, true, inv, hw_match);
+ if (ret)
+ return ret;
+ }
+
+ hw_match = ntohl(match.key->dst) & ntohl(match.mask->dst);
+ if (hw_match) {
+ ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, true,
+ false, false, inv, hw_match);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int tc_add_ports_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls,
+ struct stmmac_flow_entry *entry)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ bool inv = entry->action & STMMAC_FLOW_ACTION_DROP;
+ struct flow_match_ports match;
+ u32 hw_match;
+ bool is_udp;
+ int ret;
+
+ /* Nothing to do here */
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS))
+ return -EINVAL;
+
+ switch (entry->ip_proto) {
+ case IPPROTO_TCP:
+ is_udp = false;
+ break;
+ case IPPROTO_UDP:
+ is_udp = true;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ flow_rule_match_ports(rule, &match);
+
+ hw_match = ntohs(match.key->src) & ntohs(match.mask->src);
+ if (hw_match) {
+ ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
+ is_udp, true, inv, hw_match);
+ if (ret)
+ return ret;
+ }
+
+ hw_match = ntohs(match.key->dst) & ntohs(match.mask->dst);
+ if (hw_match) {
+ ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, true,
+ is_udp, false, inv, hw_match);
+ if (ret)
+ return ret;
+ }
+
+ entry->is_l4 = true;
+ return 0;
+}
+
+static struct stmmac_flow_entry *tc_find_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls,
+ bool get_free)
+{
+ int i;
+
+ for (i = 0; i < priv->flow_entries_max; i++) {
+ struct stmmac_flow_entry *entry = &priv->flow_entries[i];
+
+ if (entry->cookie == cls->cookie)
+ return entry;
+ if (get_free && (entry->in_use == false))
+ return entry;
+ }
+
+ return NULL;
+}
+
+static struct {
+ int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls,
+ struct stmmac_flow_entry *entry);
+} tc_flow_parsers[] = {
+ { .fn = tc_add_basic_flow },
+ { .fn = tc_add_ip4_flow },
+ { .fn = tc_add_ports_flow },
+};
+
+static int tc_add_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ int i, ret;
+
+ if (!entry) {
+ entry = tc_find_flow(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
+
+ ret = tc_parse_flow_actions(priv, &rule->action, entry,
+ cls->common.extack);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < ARRAY_SIZE(tc_flow_parsers); i++) {
+ ret = tc_flow_parsers[i].fn(priv, cls, entry);
+ if (!ret)
+ entry->in_use = true;
+ }
+
+ if (!entry->in_use)
+ return -EINVAL;
+
+ entry->cookie = cls->cookie;
+ return 0;
+}
+
+static int tc_del_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false);
+ int ret;
+
+ if (!entry || !entry->in_use)
+ return -ENOENT;
+
+ if (entry->is_l4) {
+ ret = stmmac_config_l4_filter(priv, priv->hw, entry->idx, false,
+ false, false, false, 0);
+ } else {
+ ret = stmmac_config_l3_filter(priv, priv->hw, entry->idx, false,
+ false, false, false, 0);
+ }
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->is_l4 = false;
+ return ret;
+}
+
+static struct stmmac_rfs_entry *tc_find_rfs(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls,
+ bool get_free)
+{
+ int i;
+
+ for (i = 0; i < priv->rfs_entries_total; i++) {
+ struct stmmac_rfs_entry *entry = &priv->rfs_entries[i];
+
+ if (entry->cookie == cls->cookie)
+ return entry;
+ if (get_free && entry->in_use == false)
+ return entry;
+ }
+
+ return NULL;
+}
+
+#define VLAN_PRIO_FULL_MASK (0x07)
+
+static int tc_add_vlan_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+ struct flow_match_vlan match;
+
+ if (!entry) {
+ entry = tc_find_rfs(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
+
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_VLAN])
+ return -ENOENT;
+
+ /* Nothing to do here */
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN))
+ return -EINVAL;
+
+ if (tc < 0) {
+ netdev_err(priv->dev, "Invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ flow_rule_match_vlan(rule, &match);
+
+ if (match.mask->vlan_priority) {
+ u32 prio;
+
+ if (match.mask->vlan_priority != VLAN_PRIO_FULL_MASK) {
+ netdev_err(priv->dev, "Only full mask is supported for VLAN priority");
+ return -EINVAL;
+ }
+
+ prio = BIT(match.key->vlan_priority);
+ stmmac_rx_queue_prio(priv, priv->hw, prio, tc);
+
+ entry->in_use = true;
+ entry->cookie = cls->cookie;
+ entry->tc = tc;
+ entry->type = STMMAC_RFS_T_VLAN;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]++;
+ }
+
+ return 0;
+}
+
+static int tc_del_vlan_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+
+ if (!entry || !entry->in_use || entry->type != STMMAC_RFS_T_VLAN)
+ return -ENOENT;
+
+ stmmac_rx_queue_prio(priv, priv->hw, 0, entry->tc);
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->tc = 0;
+ entry->type = 0;
+
+ priv->rfs_entries_cnt[STMMAC_RFS_T_VLAN]--;
+
+ return 0;
+}
+
+static int tc_add_ethtype_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ int tc = tc_classid_to_hwtc(priv->dev, cls->classid);
+ struct flow_match_basic match;
+
+ if (!entry) {
+ entry = tc_find_rfs(priv, cls, true);
+ if (!entry)
+ return -ENOENT;
+ }
+
+ /* Nothing to do here */
+ if (!dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC))
+ return -EINVAL;
+
+ if (tc < 0) {
+ netdev_err(priv->dev, "Invalid traffic class\n");
+ return -EINVAL;
+ }
+
+ flow_rule_match_basic(rule, &match);
+
+ if (match.mask->n_proto) {
+ u16 etype = ntohs(match.key->n_proto);
+
+ if (match.mask->n_proto != ETHER_TYPE_FULL_MASK) {
+ netdev_err(priv->dev, "Only full mask is supported for EthType filter");
+ return -EINVAL;
+ }
+ switch (etype) {
+ case ETH_P_LLDP:
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_LLDP])
+ return -ENOENT;
+
+ entry->type = STMMAC_RFS_T_LLDP;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]++;
+
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_DCBCPQ, tc);
+ break;
+ case ETH_P_1588:
+ if (priv->rfs_entries_cnt[STMMAC_RFS_T_1588] >=
+ priv->rfs_entries_max[STMMAC_RFS_T_1588])
+ return -ENOENT;
+
+ entry->type = STMMAC_RFS_T_1588;
+ priv->rfs_entries_cnt[STMMAC_RFS_T_1588]++;
+
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_PTPQ, tc);
+ break;
+ default:
+ netdev_err(priv->dev, "EthType(0x%x) is not supported", etype);
+ return -EINVAL;
+ }
+
+ entry->in_use = true;
+ entry->cookie = cls->cookie;
+ entry->tc = tc;
+ entry->etype = etype;
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int tc_del_ethtype_flow(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ struct stmmac_rfs_entry *entry = tc_find_rfs(priv, cls, false);
+
+ if (!entry || !entry->in_use ||
+ entry->type < STMMAC_RFS_T_LLDP ||
+ entry->type > STMMAC_RFS_T_1588)
+ return -ENOENT;
+
+ switch (entry->etype) {
+ case ETH_P_LLDP:
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_DCBCPQ, 0);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_LLDP]--;
+ break;
+ case ETH_P_1588:
+ stmmac_rx_queue_routing(priv, priv->hw,
+ PACKET_PTPQ, 0);
+ priv->rfs_entries_cnt[STMMAC_RFS_T_1588]--;
+ break;
+ default:
+ netdev_err(priv->dev, "EthType(0x%x) is not supported",
+ entry->etype);
+ return -EINVAL;
+ }
+
+ entry->in_use = false;
+ entry->cookie = 0;
+ entry->tc = 0;
+ entry->etype = 0;
+ entry->type = 0;
+
+ return 0;
+}
+
+static int tc_add_flow_cls(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ int ret;
+
+ ret = tc_add_flow(priv, cls);
+ if (!ret)
+ return ret;
+
+ ret = tc_add_ethtype_flow(priv, cls);
+ if (!ret)
+ return ret;
+
+ return tc_add_vlan_flow(priv, cls);
+}
+
+static int tc_del_flow_cls(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ int ret;
+
+ ret = tc_del_flow(priv, cls);
+ if (!ret)
+ return ret;
+
+ ret = tc_del_ethtype_flow(priv, cls);
+ if (!ret)
+ return ret;
+
+ return tc_del_vlan_flow(priv, cls);
+}
+
+static int tc_setup_cls(struct stmmac_priv *priv,
+ struct flow_cls_offload *cls)
+{
+ int ret = 0;
+
+ /* When RSS is enabled, the filtering will be bypassed */
+ if (priv->rss.enable)
+ return -EBUSY;
+
+ switch (cls->command) {
+ case FLOW_CLS_REPLACE:
+ ret = tc_add_flow_cls(priv, cls);
+ break;
+ case FLOW_CLS_DESTROY:
+ ret = tc_del_flow_cls(priv, cls);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+struct timespec64 stmmac_calc_tas_basetime(ktime_t old_base_time,
+ ktime_t current_time,
+ u64 cycle_time)
+{
+ struct timespec64 time;
+
+ if (ktime_after(old_base_time, current_time)) {
+ time = ktime_to_timespec64(old_base_time);
+ } else {
+ s64 n;
+ ktime_t base_time;
+
+ n = div64_s64(ktime_sub_ns(current_time, old_base_time),
+ cycle_time);
+ base_time = ktime_add_ns(old_base_time,
+ (n + 1) * cycle_time);
+
+ time = ktime_to_timespec64(base_time);
+ }
+
+ return time;
+}
+
+static int tc_setup_taprio(struct stmmac_priv *priv,
+ struct tc_taprio_qopt_offload *qopt)
+{
+ u32 size, wid = priv->dma_cap.estwid, dep = priv->dma_cap.estdep;
+ struct plat_stmmacenet_data *plat = priv->plat;
+ struct timespec64 time, current_time, qopt_time;
+ ktime_t current_time_ns;
+ bool fpe = false;
+ int i, ret = 0;
+ u64 ctr;
+
+ if (!priv->dma_cap.estsel)
+ return -EOPNOTSUPP;
+
+ switch (wid) {
+ case 0x1:
+ wid = 16;
+ break;
+ case 0x2:
+ wid = 20;
+ break;
+ case 0x3:
+ wid = 24;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ switch (dep) {
+ case 0x1:
+ dep = 64;
+ break;
+ case 0x2:
+ dep = 128;
+ break;
+ case 0x3:
+ dep = 256;
+ break;
+ case 0x4:
+ dep = 512;
+ break;
+ case 0x5:
+ dep = 1024;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (!qopt->enable)
+ goto disable;
+ if (qopt->num_entries >= dep)
+ return -EINVAL;
+ if (!qopt->cycle_time)
+ return -ERANGE;
+
+ if (!plat->est) {
+ plat->est = devm_kzalloc(priv->device, sizeof(*plat->est),
+ GFP_KERNEL);
+ if (!plat->est)
+ return -ENOMEM;
+
+ mutex_init(&priv->plat->est->lock);
+ } else {
+ memset(plat->est, 0, sizeof(*plat->est));
+ }
+
+ size = qopt->num_entries;
+
+ mutex_lock(&priv->plat->est->lock);
+ priv->plat->est->gcl_size = size;
+ priv->plat->est->enable = qopt->enable;
+ mutex_unlock(&priv->plat->est->lock);
+
+ for (i = 0; i < size; i++) {
+ s64 delta_ns = qopt->entries[i].interval;
+ u32 gates = qopt->entries[i].gate_mask;
+
+ if (delta_ns > GENMASK(wid, 0))
+ return -ERANGE;
+ if (gates > GENMASK(31 - wid, 0))
+ return -ERANGE;
+
+ switch (qopt->entries[i].command) {
+ case TC_TAPRIO_CMD_SET_GATES:
+ if (fpe)
+ return -EINVAL;
+ break;
+ case TC_TAPRIO_CMD_SET_AND_HOLD:
+ gates |= BIT(0);
+ fpe = true;
+ break;
+ case TC_TAPRIO_CMD_SET_AND_RELEASE:
+ gates &= ~BIT(0);
+ fpe = true;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ priv->plat->est->gcl[i] = delta_ns | (gates << wid);
+ }
+
+ mutex_lock(&priv->plat->est->lock);
+ /* Adjust for real system time */
+ priv->ptp_clock_ops.gettime64(&priv->ptp_clock_ops, &current_time);
+ current_time_ns = timespec64_to_ktime(current_time);
+ time = stmmac_calc_tas_basetime(qopt->base_time, current_time_ns,
+ qopt->cycle_time);
+
+ priv->plat->est->btr[0] = (u32)time.tv_nsec;
+ priv->plat->est->btr[1] = (u32)time.tv_sec;
+
+ qopt_time = ktime_to_timespec64(qopt->base_time);
+ priv->plat->est->btr_reserve[0] = (u32)qopt_time.tv_nsec;
+ priv->plat->est->btr_reserve[1] = (u32)qopt_time.tv_sec;
+
+ ctr = qopt->cycle_time;
+ priv->plat->est->ctr[0] = do_div(ctr, NSEC_PER_SEC);
+ priv->plat->est->ctr[1] = (u32)ctr;
+
+ if (fpe && !priv->dma_cap.fpesel) {
+ mutex_unlock(&priv->plat->est->lock);
+ return -EOPNOTSUPP;
+ }
+
+ /* Actual FPE register configuration will be done after FPE handshake
+ * is success.
+ */
+ priv->plat->fpe_cfg->enable = fpe;
+
+ ret = stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ if (ret) {
+ netdev_err(priv->dev, "failed to configure EST\n");
+ goto disable;
+ }
+
+ netdev_info(priv->dev, "configured EST\n");
+
+ if (fpe) {
+ stmmac_fpe_handshake(priv, true);
+ netdev_info(priv->dev, "start FPE handshake\n");
+ }
+
+ return 0;
+
+disable:
+ if (priv->plat->est) {
+ mutex_lock(&priv->plat->est->lock);
+ priv->plat->est->enable = false;
+ stmmac_est_configure(priv, priv->ioaddr, priv->plat->est,
+ priv->plat->clk_ptp_rate);
+ mutex_unlock(&priv->plat->est->lock);
+ }
+
+ priv->plat->fpe_cfg->enable = false;
+ stmmac_fpe_configure(priv, priv->ioaddr,
+ priv->plat->fpe_cfg,
+ priv->plat->tx_queues_to_use,
+ priv->plat->rx_queues_to_use,
+ false);
+ netdev_info(priv->dev, "disabled FPE\n");
+
+ stmmac_fpe_handshake(priv, false);
+ netdev_info(priv->dev, "stop FPE handshake\n");
+
+ return ret;
+}
+
+static int tc_setup_etf(struct stmmac_priv *priv,
+ struct tc_etf_qopt_offload *qopt)
+{
+ if (!priv->dma_cap.tbssel)
+ return -EOPNOTSUPP;
+ if (qopt->queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+ if (!(priv->dma_conf.tx_queue[qopt->queue].tbs & STMMAC_TBS_AVAIL))
+ return -EINVAL;
+
+ if (qopt->enable)
+ priv->dma_conf.tx_queue[qopt->queue].tbs |= STMMAC_TBS_EN;
+ else
+ priv->dma_conf.tx_queue[qopt->queue].tbs &= ~STMMAC_TBS_EN;
+
+ netdev_info(priv->dev, "%s ETF for Queue %d\n",
+ qopt->enable ? "enabled" : "disabled", qopt->queue);
+ return 0;
+}
+
+const struct stmmac_tc_ops dwmac510_tc_ops = {
+ .init = tc_init,
+ .setup_cls_u32 = tc_setup_cls_u32,
+ .setup_cbs = tc_setup_cbs,
+ .setup_cls = tc_setup_cls,
+ .setup_taprio = tc_setup_taprio,
+ .setup_etf = tc_setup_etf,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
new file mode 100644
index 000000000..9d4d8c3da
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2021, Intel Corporation. */
+
+#include <net/xdp_sock_drv.h>
+
+#include "stmmac.h"
+#include "stmmac_xdp.h"
+
+static int stmmac_xdp_enable_pool(struct stmmac_priv *priv,
+ struct xsk_buff_pool *pool, u16 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ bool need_update;
+ u32 frame_size;
+ int err;
+
+ if (queue >= priv->plat->rx_queues_to_use ||
+ queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+
+ frame_size = xsk_pool_get_rx_frame_size(pool);
+ /* XDP ZC does not span multiple frame, make sure XSK pool buffer
+ * size can at least store Q-in-Q frame.
+ */
+ if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2)
+ return -EOPNOTSUPP;
+
+ err = xsk_pool_dma_map(pool, priv->device, STMMAC_RX_DMA_ATTR);
+ if (err) {
+ netdev_err(priv->dev, "Failed to map xsk pool\n");
+ return err;
+ }
+
+ need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
+
+ if (need_update) {
+ napi_disable(&ch->rx_napi);
+ napi_disable(&ch->tx_napi);
+ stmmac_disable_rx_queue(priv, queue);
+ stmmac_disable_tx_queue(priv, queue);
+ }
+
+ set_bit(queue, priv->af_xdp_zc_qps);
+
+ if (need_update) {
+ stmmac_enable_rx_queue(priv, queue);
+ stmmac_enable_tx_queue(priv, queue);
+ napi_enable(&ch->rxtx_napi);
+
+ err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue)
+{
+ struct stmmac_channel *ch = &priv->channel[queue];
+ struct xsk_buff_pool *pool;
+ bool need_update;
+
+ if (queue >= priv->plat->rx_queues_to_use ||
+ queue >= priv->plat->tx_queues_to_use)
+ return -EINVAL;
+
+ pool = xsk_get_pool_from_qid(priv->dev, queue);
+ if (!pool)
+ return -EINVAL;
+
+ need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv);
+
+ if (need_update) {
+ napi_disable(&ch->rxtx_napi);
+ stmmac_disable_rx_queue(priv, queue);
+ stmmac_disable_tx_queue(priv, queue);
+ synchronize_rcu();
+ }
+
+ xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR);
+
+ clear_bit(queue, priv->af_xdp_zc_qps);
+
+ if (need_update) {
+ stmmac_enable_rx_queue(priv, queue);
+ stmmac_enable_tx_queue(priv, queue);
+ napi_enable(&ch->rx_napi);
+ napi_enable(&ch->tx_napi);
+ }
+
+ return 0;
+}
+
+int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
+ u16 queue)
+{
+ return pool ? stmmac_xdp_enable_pool(priv, pool, queue) :
+ stmmac_xdp_disable_pool(priv, queue);
+}
+
+int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack)
+{
+ struct net_device *dev = priv->dev;
+ struct bpf_prog *old_prog;
+ bool need_update;
+ bool if_running;
+
+ if_running = netif_running(dev);
+
+ if (prog && dev->mtu > ETH_DATA_LEN) {
+ /* For now, the driver doesn't support XDP functionality with
+ * jumbo frames so we return error.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
+ return -EOPNOTSUPP;
+ }
+
+ need_update = !!priv->xdp_prog != !!prog;
+ if (if_running && need_update)
+ stmmac_xdp_release(dev);
+
+ old_prog = xchg(&priv->xdp_prog, prog);
+ if (old_prog)
+ bpf_prog_put(old_prog);
+
+ /* Disable RX SPH for XDP operation */
+ priv->sph = priv->sph_cap && !stmmac_xdp_is_enabled(priv);
+
+ if (if_running && need_update)
+ stmmac_xdp_open(dev);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
new file mode 100644
index 000000000..896dc987d
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2021, Intel Corporation. */
+
+#ifndef _STMMAC_XDP_H_
+#define _STMMAC_XDP_H_
+
+#define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM)
+#define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
+
+int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool,
+ u16 queue);
+int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog,
+ struct netlink_ext_ack *extack);
+
+#endif /* _STMMAC_XDP_H_ */