summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/ti
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/ti
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/ti')
-rw-r--r--drivers/net/ethernet/ti/Kconfig186
-rw-r--r--drivers/net/ethernet/ti/Makefile30
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-ethtool.c746
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.c2875
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-nuss.h193
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.c799
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-qos.h37
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.c534
-rw-r--r--drivers/net/ethernet/ti/am65-cpsw-switchdev.h34
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.c1060
-rw-r--r--drivers/net/ethernet/ti/am65-cpts.h79
-rw-r--r--drivers/net/ethernet/ti/cpmac.c1251
-rw-r--r--drivers/net/ethernet/ti/cpsw-common.c94
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c244
-rw-r--r--drivers/net/ethernet/ti/cpsw.c1802
-rw-r--r--drivers/net/ethernet/ti/cpsw.h26
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c1472
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h143
-rw-r--r--drivers/net/ethernet/ti/cpsw_ethtool.c752
-rw-r--r--drivers/net/ethernet/ti/cpsw_new.c2120
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.c1582
-rw-r--r--drivers/net/ethernet/ti/cpsw_priv.h514
-rw-r--r--drivers/net/ethernet/ti/cpsw_sl.c328
-rw-r--r--drivers/net/ethernet/ti/cpsw_sl.h73
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.c544
-rw-r--r--drivers/net/ethernet/ti/cpsw_switchdev.h15
-rw-r--r--drivers/net/ethernet/ti/cpts.c819
-rw-r--r--drivers/net/ethernet/ti/cpts.h196
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c1444
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.h122
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2109
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c750
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.c126
-rw-r--r--drivers/net/ethernet/ti/k3-cppi-desc-pool.h30
-rw-r--r--drivers/net/ethernet/ti/netcp.h248
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c2280
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c3878
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c150
-rw-r--r--drivers/net/ethernet/ti/netcp_xgbepcsr.c494
-rw-r--r--drivers/net/ethernet/ti/tlan.c3277
-rw-r--r--drivers/net/ethernet/ti/tlan.h544
41 files changed, 34000 insertions, 0 deletions
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
new file mode 100644
index 000000000..fce06663e
--- /dev/null
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -0,0 +1,186 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# TI device configuration
+#
+
+config NET_VENDOR_TI
+ bool "Texas Instruments (TI) devices"
+ default y
+ depends on PCI || EISA || AR7 || ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3
+ help
+ If you have a network (Ethernet) card belonging to this class, say Y.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about TI devices. If you say Y, you will be asked for
+ your specific card in the following questions.
+
+if NET_VENDOR_TI
+
+config TI_DAVINCI_EMAC
+ tristate "TI DaVinci EMAC Support"
+ depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 ) || COMPILE_TEST
+ select TI_DAVINCI_MDIO
+ select PHYLIB
+ select GENERIC_ALLOCATOR
+ help
+ This driver supports TI's DaVinci Ethernet .
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_emac_driver. This is recommended.
+
+config TI_DAVINCI_MDIO
+ tristate "TI DaVinci MDIO Support"
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST
+ select PHYLIB
+ select MDIO_BITBANG
+ help
+ This driver supports TI's DaVinci MDIO module.
+
+ To compile this driver as a module, choose M here: the module
+ will be called davinci_mdio. This is recommended.
+
+config TI_CPSW_PHY_SEL
+ bool "TI CPSW Phy mode Selection (DEPRECATED)"
+ default n
+ help
+ This driver supports configuring of the phy mode connected to
+ the CPSW. DEPRECATED: use PHY_TI_GMII_SEL.
+
+config TI_CPSW
+ tristate "TI CPSW Switch Support"
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on TI_CPTS || !TI_CPTS
+ select TI_DAVINCI_MDIO
+ select MFD_SYSCON
+ select PAGE_POOL
+ select REGMAP
+ imply PHY_TI_GMII_SEL
+ help
+ This driver supports TI's CPSW Ethernet Switch.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cpsw.
+
+config TI_CPSW_SWITCHDEV
+ tristate "TI CPSW Switch Support with switchdev"
+ depends on ARCH_DAVINCI || ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on NET_SWITCHDEV
+ depends on TI_CPTS || !TI_CPTS
+ select PAGE_POOL
+ select TI_DAVINCI_MDIO
+ select MFD_SYSCON
+ select REGMAP
+ select NET_DEVLINK
+ imply PHY_TI_GMII_SEL
+ help
+ This driver supports TI's CPSW Ethernet Switch.
+
+ To compile this driver as a module, choose M here: the module
+ will be called cpsw_new.
+
+config TI_CPTS
+ tristate "TI Common Platform Time Sync (CPTS) Support"
+ depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || COMPILE_TEST
+ depends on COMMON_CLK
+ depends on PTP_1588_CLOCK
+ help
+ This driver supports the Common Platform Time Sync unit of
+ the CPSW Ethernet Switch and Keystone 2 1g/10g Switch Subsystem.
+ The unit can time stamp PTP UDP/IPv4 and Layer 2 packets, and the
+ driver offers a PTP Hardware Clock.
+
+config TI_K3_AM65_CPSW_NUSS
+ tristate "TI K3 AM654x/J721E CPSW Ethernet driver"
+ depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER
+ select NET_DEVLINK
+ select TI_DAVINCI_MDIO
+ select PHYLINK
+ imply PHY_TI_GMII_SEL
+ depends on TI_K3_AM65_CPTS || !TI_K3_AM65_CPTS
+ help
+ This driver supports TI K3 AM654/J721E CPSW2G Ethernet SubSystem.
+ The two-port Gigabit Ethernet MAC (MCU_CPSW0) subsystem provides
+ Ethernet packet communication for the device: One Ethernet port
+ (port 1) with selectable RGMII and RMII interfaces and an internal
+ Communications Port Programming Interface (CPPI) port (port 0).
+
+ To compile this driver as a module, choose M here: the module
+ will be called ti-am65-cpsw-nuss.
+
+config TI_K3_AM65_CPSW_SWITCHDEV
+ bool "TI K3 AM654x/J721E CPSW Switch mode support"
+ depends on TI_K3_AM65_CPSW_NUSS
+ depends on NET_SWITCHDEV
+ help
+ This enables switchdev support for TI K3 CPSWxG Ethernet
+ Switch. Enable this driver to support hardware switch support for AM65
+ CPSW NUSS driver.
+
+config TI_K3_AM65_CPTS
+ tristate "TI K3 AM65x CPTS"
+ depends on ARCH_K3 && OF
+ depends on PTP_1588_CLOCK
+ help
+ Say y here to support the TI K3 AM65x CPTS with 1588 features such as
+ PTP hardware clock for each CPTS device and network packets
+ timestamping where applicable.
+ Depending on integration CPTS blocks enable compliance with
+ the IEEE 1588-2008 standard for a precision clock synchronization
+ protocol, Ethernet Enhanced Scheduled Traffic Operations (CPTS_ESTFn)
+ and PCIe Subsystem Precision Time Measurement (PTM).
+
+config TI_AM65_CPSW_TAS
+ bool "Enable TAS offload in AM65 CPSW"
+ depends on TI_K3_AM65_CPSW_NUSS && NET_SCH_TAPRIO && TI_K3_AM65_CPTS
+ help
+ Say y here to support Time Aware Shaper(TAS) offload in AM65 CPSW.
+ AM65 CPSW hardware supports Enhanced Scheduled Traffic (EST)
+ defined in IEEE 802.1Q 2018. The EST scheduler runs on CPTS and the
+ TAS/EST schedule is updated in the Fetch RAM memory of the CPSW.
+
+config TI_KEYSTONE_NETCP
+ tristate "TI Keystone NETCP Core Support"
+ select TI_DAVINCI_MDIO
+ depends on OF
+ depends on KEYSTONE_NAVIGATOR_DMA && KEYSTONE_NAVIGATOR_QMSS
+ depends on TI_CPTS || !TI_CPTS
+ help
+ This driver supports TI's Keystone NETCP Core.
+
+ To compile this driver as a module, choose M here: the module
+ will be called keystone_netcp.
+
+config TI_KEYSTONE_NETCP_ETHSS
+ depends on TI_KEYSTONE_NETCP
+ tristate "TI Keystone NETCP Ethernet subsystem Support"
+ help
+
+ To compile this driver as a module, choose M here: the module
+ will be called keystone_netcp_ethss.
+
+config TLAN
+ tristate "TI ThunderLAN support"
+ depends on (PCI || EISA)
+ help
+ If you have a PCI Ethernet network card based on the ThunderLAN chip
+ which is supported by this driver, say Y here.
+
+ Devices currently supported by this driver are Compaq Netelligent,
+ Compaq NetFlex and Olicom cards. Please read the file
+ <file:Documentation/networking/device_drivers/ethernet/ti/tlan.rst>
+ for more details.
+
+ To compile this driver as a module, choose M here. The module
+ will be called tlan.
+
+ Please email feedback to <torben.mathiasen@compaq.com>.
+
+config CPMAC
+ tristate "TI AR7 CPMAC Ethernet support"
+ depends on AR7
+ select PHYLIB
+ help
+ TI AR7 CPMAC Ethernet support
+
+endif # NET_VENDOR_TI
diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile
new file mode 100644
index 000000000..75f761efb
--- /dev/null
+++ b/drivers/net/ethernet/ti/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the TI network device drivers.
+#
+
+obj-$(CONFIG_TI_CPSW) += cpsw-common.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += cpsw-common.o
+obj-$(CONFIG_TI_CPSW_SWITCHDEV) += cpsw-common.o
+
+obj-$(CONFIG_TLAN) += tlan.o
+obj-$(CONFIG_CPMAC) += cpmac.o
+obj-$(CONFIG_TI_DAVINCI_EMAC) += ti_davinci_emac.o
+ti_davinci_emac-y := davinci_emac.o davinci_cpdma.o
+obj-$(CONFIG_TI_DAVINCI_MDIO) += davinci_mdio.o
+obj-$(CONFIG_TI_CPSW_PHY_SEL) += cpsw-phy-sel.o
+obj-$(CONFIG_TI_CPTS) += cpts.o
+obj-$(CONFIG_TI_CPSW) += ti_cpsw.o
+ti_cpsw-y := cpsw.o davinci_cpdma.o cpsw_ale.o cpsw_priv.o cpsw_sl.o cpsw_ethtool.o
+obj-$(CONFIG_TI_CPSW_SWITCHDEV) += ti_cpsw_new.o
+ti_cpsw_new-y := cpsw_switchdev.o cpsw_new.o davinci_cpdma.o cpsw_ale.o cpsw_sl.o cpsw_priv.o cpsw_ethtool.o
+
+obj-$(CONFIG_TI_KEYSTONE_NETCP) += keystone_netcp.o
+keystone_netcp-y := netcp_core.o cpsw_ale.o
+obj-$(CONFIG_TI_KEYSTONE_NETCP_ETHSS) += keystone_netcp_ethss.o
+keystone_netcp_ethss-y := netcp_ethss.o netcp_sgmii.o netcp_xgbepcsr.o cpsw_ale.o
+
+obj-$(CONFIG_TI_K3_AM65_CPSW_NUSS) += ti-am65-cpsw-nuss.o
+ti-am65-cpsw-nuss-y := am65-cpsw-nuss.o cpsw_sl.o am65-cpsw-ethtool.o cpsw_ale.o k3-cppi-desc-pool.o am65-cpsw-qos.o
+ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o
+obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o
diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
new file mode 100644
index 000000000..c51e2af91
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c
@@ -0,0 +1,746 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver ethtool ops
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#include <linux/net_tstamp.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "am65-cpsw-nuss.h"
+#include "cpsw_ale.h"
+#include "am65-cpts.h"
+
+#define AM65_CPSW_REGDUMP_VER 0x1
+
+enum {
+ AM65_CPSW_REGDUMP_MOD_NUSS = 1,
+ AM65_CPSW_REGDUMP_MOD_RGMII_STATUS = 2,
+ AM65_CPSW_REGDUMP_MOD_MDIO = 3,
+ AM65_CPSW_REGDUMP_MOD_CPSW = 4,
+ AM65_CPSW_REGDUMP_MOD_CPSW_P0 = 5,
+ AM65_CPSW_REGDUMP_MOD_CPSW_P1 = 6,
+ AM65_CPSW_REGDUMP_MOD_CPSW_CPTS = 7,
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE = 8,
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL = 9,
+ AM65_CPSW_REGDUMP_MOD_LAST,
+};
+
+/**
+ * struct am65_cpsw_regdump_hdr - regdump record header
+ *
+ * @module_id: CPSW module ID
+ * @len: CPSW module registers space length in u32
+ */
+
+struct am65_cpsw_regdump_hdr {
+ u32 module_id;
+ u32 len;
+};
+
+/**
+ * struct am65_cpsw_regdump_item - regdump module description
+ *
+ * @hdr: CPSW module header
+ * @start_ofs: CPSW module registers start addr
+ * @end_ofs: CPSW module registers end addr
+ *
+ * Registers dump provided in the format:
+ * u32 : module ID
+ * u32 : dump length
+ * u32[..len]: registers values
+ */
+struct am65_cpsw_regdump_item {
+ struct am65_cpsw_regdump_hdr hdr;
+ u32 start_ofs;
+ u32 end_ofs;
+};
+
+#define AM65_CPSW_REGDUMP_REC(mod, start, end) { \
+ .hdr.module_id = (mod), \
+ .hdr.len = (end + 4 - start) * 2 + \
+ sizeof(struct am65_cpsw_regdump_hdr), \
+ .start_ofs = (start), \
+ .end_ofs = end, \
+}
+
+static const struct am65_cpsw_regdump_item am65_cpsw_regdump[] = {
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_NUSS, 0x0, 0x1c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_RGMII_STATUS, 0x30, 0x4c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_MDIO, 0xf00, 0xffc),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW, 0x20000, 0x2011c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_P0, 0x21000, 0x21320),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_P1, 0x22000, 0x223a4),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_CPTS,
+ 0x3d000, 0x3d048),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_ALE, 0x3e000, 0x3e13c),
+ AM65_CPSW_REGDUMP_REC(AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL, 0, 0),
+};
+
+struct am65_cpsw_stats_regs {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames; /* slave */
+ u32 rx_crc_errors;
+ u32 rx_align_code_errors; /* slave */
+ u32 rx_oversized_frames;
+ u32 rx_jabber_frames; /* slave */
+ u32 rx_undersized_frames;
+ u32 rx_fragments; /* slave */
+ u32 ale_drop;
+ u32 ale_overrun_drop;
+ u32 rx_octets;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames; /* slave */
+ u32 tx_deferred_frames; /* slave */
+ u32 tx_collision_frames; /* slave */
+ u32 tx_single_coll_frames; /* slave */
+ u32 tx_mult_coll_frames; /* slave */
+ u32 tx_excessive_collisions; /* slave */
+ u32 tx_late_collisions; /* slave */
+ u32 rx_ipg_error; /* slave 10G only */
+ u32 tx_carrier_sense_errors; /* slave */
+ u32 tx_octets;
+ u32 tx_64B_frames;
+ u32 tx_65_to_127B_frames;
+ u32 tx_128_to_255B_frames;
+ u32 tx_256_to_511B_frames;
+ u32 tx_512_to_1023B_frames;
+ u32 tx_1024B_frames;
+ u32 net_octets;
+ u32 rx_bottom_fifo_drop;
+ u32 rx_port_mask_drop;
+ u32 rx_top_fifo_drop;
+ u32 ale_rate_limit_drop;
+ u32 ale_vid_ingress_drop;
+ u32 ale_da_eq_sa_drop;
+ u32 ale_block_drop; /* K3 */
+ u32 ale_secure_drop; /* K3 */
+ u32 ale_auth_drop; /* K3 */
+ u32 ale_unknown_ucast;
+ u32 ale_unknown_ucast_bytes;
+ u32 ale_unknown_mcast;
+ u32 ale_unknown_mcast_bytes;
+ u32 ale_unknown_bcast;
+ u32 ale_unknown_bcast_bytes;
+ u32 ale_pol_match;
+ u32 ale_pol_match_red;
+ u32 ale_pol_match_yellow;
+ u32 ale_mcast_sa_drop; /* K3 */
+ u32 ale_dual_vlan_drop; /* K3 */
+ u32 ale_len_err_drop; /* K3 */
+ u32 ale_ip_next_hdr_drop; /* K3 */
+ u32 ale_ipv4_frag_drop; /* K3 */
+ u32 __rsvd_1[24];
+ u32 iet_rx_assembly_err; /* K3 slave */
+ u32 iet_rx_assembly_ok; /* K3 slave */
+ u32 iet_rx_smd_err; /* K3 slave */
+ u32 iet_rx_frag; /* K3 slave */
+ u32 iet_tx_hold; /* K3 slave */
+ u32 iet_tx_frag; /* K3 slave */
+ u32 __rsvd_2[9];
+ u32 tx_mem_protect_err;
+ /* following NU only */
+ u32 tx_pri0;
+ u32 tx_pri1;
+ u32 tx_pri2;
+ u32 tx_pri3;
+ u32 tx_pri4;
+ u32 tx_pri5;
+ u32 tx_pri6;
+ u32 tx_pri7;
+ u32 tx_pri0_bcnt;
+ u32 tx_pri1_bcnt;
+ u32 tx_pri2_bcnt;
+ u32 tx_pri3_bcnt;
+ u32 tx_pri4_bcnt;
+ u32 tx_pri5_bcnt;
+ u32 tx_pri6_bcnt;
+ u32 tx_pri7_bcnt;
+ u32 tx_pri0_drop;
+ u32 tx_pri1_drop;
+ u32 tx_pri2_drop;
+ u32 tx_pri3_drop;
+ u32 tx_pri4_drop;
+ u32 tx_pri5_drop;
+ u32 tx_pri6_drop;
+ u32 tx_pri7_drop;
+ u32 tx_pri0_drop_bcnt;
+ u32 tx_pri1_drop_bcnt;
+ u32 tx_pri2_drop_bcnt;
+ u32 tx_pri3_drop_bcnt;
+ u32 tx_pri4_drop_bcnt;
+ u32 tx_pri5_drop_bcnt;
+ u32 tx_pri6_drop_bcnt;
+ u32 tx_pri7_drop_bcnt;
+};
+
+struct am65_cpsw_ethtool_stat {
+ char desc[ETH_GSTRING_LEN];
+ int offset;
+};
+
+#define AM65_CPSW_STATS(prefix, field) \
+{ \
+ #prefix#field, \
+ offsetof(struct am65_cpsw_stats_regs, field) \
+}
+
+static const struct am65_cpsw_ethtool_stat am65_host_stats[] = {
+ AM65_CPSW_STATS(p0_, rx_good_frames),
+ AM65_CPSW_STATS(p0_, rx_broadcast_frames),
+ AM65_CPSW_STATS(p0_, rx_multicast_frames),
+ AM65_CPSW_STATS(p0_, rx_crc_errors),
+ AM65_CPSW_STATS(p0_, rx_oversized_frames),
+ AM65_CPSW_STATS(p0_, rx_undersized_frames),
+ AM65_CPSW_STATS(p0_, ale_drop),
+ AM65_CPSW_STATS(p0_, ale_overrun_drop),
+ AM65_CPSW_STATS(p0_, rx_octets),
+ AM65_CPSW_STATS(p0_, tx_good_frames),
+ AM65_CPSW_STATS(p0_, tx_broadcast_frames),
+ AM65_CPSW_STATS(p0_, tx_multicast_frames),
+ AM65_CPSW_STATS(p0_, tx_octets),
+ AM65_CPSW_STATS(p0_, tx_64B_frames),
+ AM65_CPSW_STATS(p0_, tx_65_to_127B_frames),
+ AM65_CPSW_STATS(p0_, tx_128_to_255B_frames),
+ AM65_CPSW_STATS(p0_, tx_256_to_511B_frames),
+ AM65_CPSW_STATS(p0_, tx_512_to_1023B_frames),
+ AM65_CPSW_STATS(p0_, tx_1024B_frames),
+ AM65_CPSW_STATS(p0_, net_octets),
+ AM65_CPSW_STATS(p0_, rx_bottom_fifo_drop),
+ AM65_CPSW_STATS(p0_, rx_port_mask_drop),
+ AM65_CPSW_STATS(p0_, rx_top_fifo_drop),
+ AM65_CPSW_STATS(p0_, ale_rate_limit_drop),
+ AM65_CPSW_STATS(p0_, ale_vid_ingress_drop),
+ AM65_CPSW_STATS(p0_, ale_da_eq_sa_drop),
+ AM65_CPSW_STATS(p0_, ale_block_drop),
+ AM65_CPSW_STATS(p0_, ale_secure_drop),
+ AM65_CPSW_STATS(p0_, ale_auth_drop),
+ AM65_CPSW_STATS(p0_, ale_unknown_ucast),
+ AM65_CPSW_STATS(p0_, ale_unknown_ucast_bytes),
+ AM65_CPSW_STATS(p0_, ale_unknown_mcast),
+ AM65_CPSW_STATS(p0_, ale_unknown_mcast_bytes),
+ AM65_CPSW_STATS(p0_, ale_unknown_bcast),
+ AM65_CPSW_STATS(p0_, ale_unknown_bcast_bytes),
+ AM65_CPSW_STATS(p0_, ale_pol_match),
+ AM65_CPSW_STATS(p0_, ale_pol_match_red),
+ AM65_CPSW_STATS(p0_, ale_pol_match_yellow),
+ AM65_CPSW_STATS(p0_, ale_mcast_sa_drop),
+ AM65_CPSW_STATS(p0_, ale_dual_vlan_drop),
+ AM65_CPSW_STATS(p0_, ale_len_err_drop),
+ AM65_CPSW_STATS(p0_, ale_ip_next_hdr_drop),
+ AM65_CPSW_STATS(p0_, ale_ipv4_frag_drop),
+ AM65_CPSW_STATS(p0_, tx_mem_protect_err),
+ AM65_CPSW_STATS(p0_, tx_pri0),
+ AM65_CPSW_STATS(p0_, tx_pri1),
+ AM65_CPSW_STATS(p0_, tx_pri2),
+ AM65_CPSW_STATS(p0_, tx_pri3),
+ AM65_CPSW_STATS(p0_, tx_pri4),
+ AM65_CPSW_STATS(p0_, tx_pri5),
+ AM65_CPSW_STATS(p0_, tx_pri6),
+ AM65_CPSW_STATS(p0_, tx_pri7),
+ AM65_CPSW_STATS(p0_, tx_pri0_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri1_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri2_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri3_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri4_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri5_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri6_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri7_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri0_drop),
+ AM65_CPSW_STATS(p0_, tx_pri1_drop),
+ AM65_CPSW_STATS(p0_, tx_pri2_drop),
+ AM65_CPSW_STATS(p0_, tx_pri3_drop),
+ AM65_CPSW_STATS(p0_, tx_pri4_drop),
+ AM65_CPSW_STATS(p0_, tx_pri5_drop),
+ AM65_CPSW_STATS(p0_, tx_pri6_drop),
+ AM65_CPSW_STATS(p0_, tx_pri7_drop),
+ AM65_CPSW_STATS(p0_, tx_pri0_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri1_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri2_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri3_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri4_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri5_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri6_drop_bcnt),
+ AM65_CPSW_STATS(p0_, tx_pri7_drop_bcnt),
+};
+
+static const struct am65_cpsw_ethtool_stat am65_slave_stats[] = {
+ AM65_CPSW_STATS(, rx_good_frames),
+ AM65_CPSW_STATS(, rx_broadcast_frames),
+ AM65_CPSW_STATS(, rx_multicast_frames),
+ AM65_CPSW_STATS(, rx_pause_frames),
+ AM65_CPSW_STATS(, rx_crc_errors),
+ AM65_CPSW_STATS(, rx_align_code_errors),
+ AM65_CPSW_STATS(, rx_oversized_frames),
+ AM65_CPSW_STATS(, rx_jabber_frames),
+ AM65_CPSW_STATS(, rx_undersized_frames),
+ AM65_CPSW_STATS(, rx_fragments),
+ AM65_CPSW_STATS(, ale_drop),
+ AM65_CPSW_STATS(, ale_overrun_drop),
+ AM65_CPSW_STATS(, rx_octets),
+ AM65_CPSW_STATS(, tx_good_frames),
+ AM65_CPSW_STATS(, tx_broadcast_frames),
+ AM65_CPSW_STATS(, tx_multicast_frames),
+ AM65_CPSW_STATS(, tx_pause_frames),
+ AM65_CPSW_STATS(, tx_deferred_frames),
+ AM65_CPSW_STATS(, tx_collision_frames),
+ AM65_CPSW_STATS(, tx_single_coll_frames),
+ AM65_CPSW_STATS(, tx_mult_coll_frames),
+ AM65_CPSW_STATS(, tx_excessive_collisions),
+ AM65_CPSW_STATS(, tx_late_collisions),
+ AM65_CPSW_STATS(, rx_ipg_error),
+ AM65_CPSW_STATS(, tx_carrier_sense_errors),
+ AM65_CPSW_STATS(, tx_octets),
+ AM65_CPSW_STATS(, tx_64B_frames),
+ AM65_CPSW_STATS(, tx_65_to_127B_frames),
+ AM65_CPSW_STATS(, tx_128_to_255B_frames),
+ AM65_CPSW_STATS(, tx_256_to_511B_frames),
+ AM65_CPSW_STATS(, tx_512_to_1023B_frames),
+ AM65_CPSW_STATS(, tx_1024B_frames),
+ AM65_CPSW_STATS(, net_octets),
+ AM65_CPSW_STATS(, rx_bottom_fifo_drop),
+ AM65_CPSW_STATS(, rx_port_mask_drop),
+ AM65_CPSW_STATS(, rx_top_fifo_drop),
+ AM65_CPSW_STATS(, ale_rate_limit_drop),
+ AM65_CPSW_STATS(, ale_vid_ingress_drop),
+ AM65_CPSW_STATS(, ale_da_eq_sa_drop),
+ AM65_CPSW_STATS(, ale_block_drop),
+ AM65_CPSW_STATS(, ale_secure_drop),
+ AM65_CPSW_STATS(, ale_auth_drop),
+ AM65_CPSW_STATS(, ale_unknown_ucast),
+ AM65_CPSW_STATS(, ale_unknown_ucast_bytes),
+ AM65_CPSW_STATS(, ale_unknown_mcast),
+ AM65_CPSW_STATS(, ale_unknown_mcast_bytes),
+ AM65_CPSW_STATS(, ale_unknown_bcast),
+ AM65_CPSW_STATS(, ale_unknown_bcast_bytes),
+ AM65_CPSW_STATS(, ale_pol_match),
+ AM65_CPSW_STATS(, ale_pol_match_red),
+ AM65_CPSW_STATS(, ale_pol_match_yellow),
+ AM65_CPSW_STATS(, ale_mcast_sa_drop),
+ AM65_CPSW_STATS(, ale_dual_vlan_drop),
+ AM65_CPSW_STATS(, ale_len_err_drop),
+ AM65_CPSW_STATS(, ale_ip_next_hdr_drop),
+ AM65_CPSW_STATS(, ale_ipv4_frag_drop),
+ AM65_CPSW_STATS(, iet_rx_assembly_err),
+ AM65_CPSW_STATS(, iet_rx_assembly_ok),
+ AM65_CPSW_STATS(, iet_rx_smd_err),
+ AM65_CPSW_STATS(, iet_rx_frag),
+ AM65_CPSW_STATS(, iet_tx_hold),
+ AM65_CPSW_STATS(, iet_tx_frag),
+ AM65_CPSW_STATS(, tx_mem_protect_err),
+ AM65_CPSW_STATS(, tx_pri0),
+ AM65_CPSW_STATS(, tx_pri1),
+ AM65_CPSW_STATS(, tx_pri2),
+ AM65_CPSW_STATS(, tx_pri3),
+ AM65_CPSW_STATS(, tx_pri4),
+ AM65_CPSW_STATS(, tx_pri5),
+ AM65_CPSW_STATS(, tx_pri6),
+ AM65_CPSW_STATS(, tx_pri7),
+ AM65_CPSW_STATS(, tx_pri0_bcnt),
+ AM65_CPSW_STATS(, tx_pri1_bcnt),
+ AM65_CPSW_STATS(, tx_pri2_bcnt),
+ AM65_CPSW_STATS(, tx_pri3_bcnt),
+ AM65_CPSW_STATS(, tx_pri4_bcnt),
+ AM65_CPSW_STATS(, tx_pri5_bcnt),
+ AM65_CPSW_STATS(, tx_pri6_bcnt),
+ AM65_CPSW_STATS(, tx_pri7_bcnt),
+ AM65_CPSW_STATS(, tx_pri0_drop),
+ AM65_CPSW_STATS(, tx_pri1_drop),
+ AM65_CPSW_STATS(, tx_pri2_drop),
+ AM65_CPSW_STATS(, tx_pri3_drop),
+ AM65_CPSW_STATS(, tx_pri4_drop),
+ AM65_CPSW_STATS(, tx_pri5_drop),
+ AM65_CPSW_STATS(, tx_pri6_drop),
+ AM65_CPSW_STATS(, tx_pri7_drop),
+ AM65_CPSW_STATS(, tx_pri0_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri1_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri2_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri3_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri4_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri5_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri6_drop_bcnt),
+ AM65_CPSW_STATS(, tx_pri7_drop_bcnt),
+};
+
+/* Ethtool priv_flags */
+static const char am65_cpsw_ethtool_priv_flags[][ETH_GSTRING_LEN] = {
+#define AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN BIT(0)
+ "p0-rx-ptype-rrobin",
+};
+
+static int am65_cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
+ dev_err(common->dev, "ethtool begin failed %d\n", ret);
+
+ return ret;
+}
+
+static void am65_cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int ret;
+
+ ret = pm_runtime_put(common->dev);
+ if (ret < 0 && ret != -EBUSY)
+ dev_err(common->dev, "ethtool complete failed %d\n", ret);
+}
+
+static void am65_cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ strscpy(info->driver, dev_driver_string(common->dev),
+ sizeof(info->driver));
+ strscpy(info->bus_info, dev_name(common->dev), sizeof(info->bus_info));
+}
+
+static u32 am65_cpsw_get_msglevel(struct net_device *ndev)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+static void am65_cpsw_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+static void am65_cpsw_get_channels(struct net_device *ndev,
+ struct ethtool_channels *ch)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ ch->max_rx = AM65_CPSW_MAX_RX_QUEUES;
+ ch->max_tx = AM65_CPSW_MAX_TX_QUEUES;
+ ch->rx_count = AM65_CPSW_MAX_RX_QUEUES;
+ ch->tx_count = common->tx_ch_num;
+}
+
+static int am65_cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ if (!chs->rx_count || !chs->tx_count)
+ return -EINVAL;
+
+ /* Check if interface is up. Can change the num queues when
+ * the interface is down.
+ */
+ if (common->usage_count)
+ return -EBUSY;
+
+ am65_cpsw_nuss_remove_tx_chns(common);
+
+ return am65_cpsw_nuss_update_tx_chns(common, chs->tx_count);
+}
+
+static void
+am65_cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ /* not supported */
+ ering->tx_pending = common->tx_chns[0].descs_num;
+ ering->rx_pending = common->rx_chns.descs_num;
+}
+
+static void am65_cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ phylink_ethtool_get_pauseparam(salve->phylink, pause);
+}
+
+static int am65_cpsw_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ return phylink_ethtool_set_pauseparam(salve->phylink, pause);
+}
+
+static void am65_cpsw_get_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ phylink_ethtool_get_wol(salve->phylink, wol);
+}
+
+static int am65_cpsw_set_wol(struct net_device *ndev,
+ struct ethtool_wolinfo *wol)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ return phylink_ethtool_set_wol(salve->phylink, wol);
+}
+
+static int am65_cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ return phylink_ethtool_ksettings_get(salve->phylink, ecmd);
+}
+
+static int
+am65_cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ return phylink_ethtool_ksettings_set(salve->phylink, ecmd);
+}
+
+static int am65_cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ return phylink_ethtool_get_eee(salve->phylink, edata);
+}
+
+static int am65_cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ return phylink_ethtool_set_eee(salve->phylink, edata);
+}
+
+static int am65_cpsw_nway_reset(struct net_device *ndev)
+{
+ struct am65_cpsw_slave_data *salve = am65_ndev_to_slave(ndev);
+
+ return phylink_ethtool_nway_reset(salve->phylink);
+}
+
+static int am65_cpsw_get_regs_len(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ u32 ale_entries, i, regdump_len = 0;
+
+ ale_entries = cpsw_ale_get_num_entries(common->ale);
+ for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
+ if (am65_cpsw_regdump[i].hdr.module_id ==
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
+ regdump_len += sizeof(struct am65_cpsw_regdump_hdr);
+ regdump_len += ale_entries *
+ ALE_ENTRY_WORDS * sizeof(u32);
+ continue;
+ }
+ regdump_len += am65_cpsw_regdump[i].hdr.len;
+ }
+
+ return regdump_len;
+}
+
+static void am65_cpsw_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ u32 ale_entries, i, j, pos, *reg = p;
+
+ /* update CPSW IP version */
+ regs->version = AM65_CPSW_REGDUMP_VER;
+ ale_entries = cpsw_ale_get_num_entries(common->ale);
+
+ pos = 0;
+ for (i = 0; i < ARRAY_SIZE(am65_cpsw_regdump); i++) {
+ reg[pos++] = am65_cpsw_regdump[i].hdr.module_id;
+
+ if (am65_cpsw_regdump[i].hdr.module_id ==
+ AM65_CPSW_REGDUMP_MOD_CPSW_ALE_TBL) {
+ u32 ale_tbl_len = ale_entries *
+ ALE_ENTRY_WORDS * sizeof(u32) +
+ sizeof(struct am65_cpsw_regdump_hdr);
+ reg[pos++] = ale_tbl_len;
+ cpsw_ale_dump(common->ale, &reg[pos]);
+ pos += ale_tbl_len;
+ continue;
+ }
+
+ reg[pos++] = am65_cpsw_regdump[i].hdr.len;
+
+ j = am65_cpsw_regdump[i].start_ofs;
+ do {
+ reg[pos++] = j;
+ reg[pos++] = readl_relaxed(common->ss_base + j);
+ j += sizeof(u32);
+ } while (j <= am65_cpsw_regdump[i].end_ofs);
+ }
+}
+
+static int am65_cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(am65_host_stats) +
+ ARRAY_SIZE(am65_slave_stats);
+ case ETH_SS_PRIV_FLAGS:
+ return ARRAY_SIZE(am65_cpsw_ethtool_priv_flags);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void am65_cpsw_get_strings(struct net_device *ndev,
+ u32 stringset, u8 *data)
+{
+ const struct am65_cpsw_ethtool_stat *hw_stats;
+ u32 i, num_stats;
+ u8 *p = data;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ num_stats = ARRAY_SIZE(am65_host_stats);
+ hw_stats = am65_host_stats;
+ for (i = 0; i < num_stats; i++) {
+ memcpy(p, hw_stats[i].desc, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ num_stats = ARRAY_SIZE(am65_slave_stats);
+ hw_stats = am65_slave_stats;
+ for (i = 0; i < num_stats; i++) {
+ memcpy(p, hw_stats[i].desc, ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_PRIV_FLAGS:
+ num_stats = ARRAY_SIZE(am65_cpsw_ethtool_priv_flags);
+
+ for (i = 0; i < num_stats; i++) {
+ memcpy(p, am65_cpsw_ethtool_priv_flags[i],
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+}
+
+static void am65_cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ const struct am65_cpsw_ethtool_stat *hw_stats;
+ struct am65_cpsw_host *host_p;
+ struct am65_cpsw_port *port;
+ u32 i, num_stats;
+
+ host_p = am65_common_get_host(common);
+ port = am65_ndev_to_port(ndev);
+ num_stats = ARRAY_SIZE(am65_host_stats);
+ hw_stats = am65_host_stats;
+ for (i = 0; i < num_stats; i++)
+ *data++ = readl_relaxed(host_p->stat_base +
+ hw_stats[i].offset);
+
+ num_stats = ARRAY_SIZE(am65_slave_stats);
+ hw_stats = am65_slave_stats;
+ for (i = 0; i < num_stats; i++)
+ *data++ = readl_relaxed(port->stat_base +
+ hw_stats[i].offset);
+}
+
+static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return ethtool_op_get_ts_info(ndev, info);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = am65_cpts_phc_index(common->cpts);
+ info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON);
+ info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL);
+ return 0;
+}
+
+static u32 am65_cpsw_get_ethtool_priv_flags(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ u32 priv_flags = 0;
+
+ if (common->pf_p0_rx_ptype_rrobin)
+ priv_flags |= AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN;
+
+ return priv_flags;
+}
+
+static int am65_cpsw_set_ethtool_priv_flags(struct net_device *ndev, u32 flags)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ int rrobin;
+
+ rrobin = !!(flags & AM65_CPSW_PRIV_P0_RX_PTYPE_RROBIN);
+
+ if (common->usage_count)
+ return -EBUSY;
+
+ if (common->est_enabled && rrobin) {
+ netdev_err(ndev,
+ "p0-rx-ptype-rrobin flag conflicts with QOS\n");
+ return -EINVAL;
+ }
+
+ common->pf_p0_rx_ptype_rrobin = rrobin;
+
+ return 0;
+}
+
+const struct ethtool_ops am65_cpsw_ethtool_ops_slave = {
+ .begin = am65_cpsw_ethtool_op_begin,
+ .complete = am65_cpsw_ethtool_op_complete,
+ .get_drvinfo = am65_cpsw_get_drvinfo,
+ .get_msglevel = am65_cpsw_get_msglevel,
+ .set_msglevel = am65_cpsw_set_msglevel,
+ .get_channels = am65_cpsw_get_channels,
+ .set_channels = am65_cpsw_set_channels,
+ .get_ringparam = am65_cpsw_get_ringparam,
+ .get_regs_len = am65_cpsw_get_regs_len,
+ .get_regs = am65_cpsw_get_regs,
+ .get_sset_count = am65_cpsw_get_sset_count,
+ .get_strings = am65_cpsw_get_strings,
+ .get_ethtool_stats = am65_cpsw_get_ethtool_stats,
+ .get_ts_info = am65_cpsw_get_ethtool_ts_info,
+ .get_priv_flags = am65_cpsw_get_ethtool_priv_flags,
+ .set_priv_flags = am65_cpsw_set_ethtool_priv_flags,
+
+ .get_link = ethtool_op_get_link,
+ .get_link_ksettings = am65_cpsw_get_link_ksettings,
+ .set_link_ksettings = am65_cpsw_set_link_ksettings,
+ .get_pauseparam = am65_cpsw_get_pauseparam,
+ .set_pauseparam = am65_cpsw_set_pauseparam,
+ .get_wol = am65_cpsw_get_wol,
+ .set_wol = am65_cpsw_set_wol,
+ .get_eee = am65_cpsw_get_eee,
+ .set_eee = am65_cpsw_set_eee,
+ .nway_reset = am65_cpsw_nway_reset,
+};
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
new file mode 100644
index 000000000..76fabeae5
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c
@@ -0,0 +1,2875 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet Switch SubSystem Driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/kmemleak.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/phylink.h>
+#include <linux/phy/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/sys_soc.h>
+#include <linux/dma/ti-cppi5.h>
+#include <linux/dma/k3-udma-glue.h>
+#include <net/switchdev.h>
+
+#include "cpsw_ale.h"
+#include "cpsw_sl.h"
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-switchdev.h"
+#include "k3-cppi-desc-pool.h"
+#include "am65-cpts.h"
+
+#define AM65_CPSW_SS_BASE 0x0
+#define AM65_CPSW_SGMII_BASE 0x100
+#define AM65_CPSW_XGMII_BASE 0x2100
+#define AM65_CPSW_CPSW_NU_BASE 0x20000
+#define AM65_CPSW_NU_PORTS_BASE 0x1000
+#define AM65_CPSW_NU_FRAM_BASE 0x12000
+#define AM65_CPSW_NU_STATS_BASE 0x1a000
+#define AM65_CPSW_NU_ALE_BASE 0x1e000
+#define AM65_CPSW_NU_CPTS_BASE 0x1d000
+
+#define AM65_CPSW_NU_PORTS_OFFSET 0x1000
+#define AM65_CPSW_NU_STATS_PORT_OFFSET 0x200
+#define AM65_CPSW_NU_FRAM_PORT_OFFSET 0x200
+
+#define AM65_CPSW_MAX_PORTS 8
+
+#define AM65_CPSW_MIN_PACKET_SIZE VLAN_ETH_ZLEN
+#define AM65_CPSW_MAX_PACKET_SIZE 2024
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_REG_STAT_PORT_EN 0x014
+#define AM65_CPSW_REG_PTYPE 0x018
+
+#define AM65_CPSW_P0_REG_CTL 0x004
+#define AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET 0x008
+
+#define AM65_CPSW_PORT_REG_PRI_CTL 0x01c
+#define AM65_CPSW_PORT_REG_RX_PRI_MAP 0x020
+#define AM65_CPSW_PORT_REG_RX_MAXLEN 0x024
+
+#define AM65_CPSW_PORTN_REG_SA_L 0x308
+#define AM65_CPSW_PORTN_REG_SA_H 0x30c
+#define AM65_CPSW_PORTN_REG_TS_CTL 0x310
+#define AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG 0x314
+#define AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG 0x318
+#define AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 0x31C
+
+#define AM65_CPSW_SGMII_CONTROL_REG 0x010
+#define AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE BIT(0)
+
+#define AM65_CPSW_CTL_VLAN_AWARE BIT(1)
+#define AM65_CPSW_CTL_P0_ENABLE BIT(2)
+#define AM65_CPSW_CTL_P0_TX_CRC_REMOVE BIT(13)
+#define AM65_CPSW_CTL_P0_RX_PAD BIT(14)
+
+/* AM65_CPSW_P0_REG_CTL */
+#define AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN BIT(0)
+
+/* AM65_CPSW_PORT_REG_PRI_CTL */
+#define AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN BIT(8)
+
+/* AM65_CPSW_PN_TS_CTL register fields */
+#define AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN BIT(4)
+#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN BIT(5)
+#define AM65_CPSW_PN_TS_CTL_TX_VLAN_LT2_EN BIT(6)
+#define AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN BIT(7)
+#define AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN BIT(10)
+#define AM65_CPSW_PN_TS_CTL_TX_HOST_TS_EN BIT(11)
+#define AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT 16
+
+/* AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG register fields */
+#define AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT 16
+
+/* AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2 */
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 BIT(16)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 BIT(17)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 BIT(18)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 BIT(19)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 BIT(20)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 BIT(21)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 BIT(22)
+#define AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO BIT(23)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define AM65_CPSW_TS_EVENT_MSG_TYPE_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+
+#define AM65_CPSW_TS_SEQ_ID_OFFSET (0x1e)
+
+#define AM65_CPSW_TS_TX_ANX_ALL_EN \
+ (AM65_CPSW_PN_TS_CTL_TX_ANX_D_EN | \
+ AM65_CPSW_PN_TS_CTL_TX_ANX_E_EN | \
+ AM65_CPSW_PN_TS_CTL_TX_ANX_F_EN)
+
+#define AM65_CPSW_ALE_AGEOUT_DEFAULT 30
+/* Number of TX/RX descriptors */
+#define AM65_CPSW_MAX_TX_DESC 500
+#define AM65_CPSW_MAX_RX_DESC 500
+
+#define AM65_CPSW_NAV_PS_DATA_SIZE 16
+#define AM65_CPSW_NAV_SW_DATA_SIZE 16
+
+#define AM65_CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_IFUP | NETIF_MSG_PROBE | NETIF_MSG_IFDOWN | \
+ NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+static void am65_cpsw_port_set_sl_mac(struct am65_cpsw_port *slave,
+ const u8 *dev_addr)
+{
+ u32 mac_hi = (dev_addr[0] << 0) | (dev_addr[1] << 8) |
+ (dev_addr[2] << 16) | (dev_addr[3] << 24);
+ u32 mac_lo = (dev_addr[4] << 0) | (dev_addr[5] << 8);
+
+ writel(mac_hi, slave->port_base + AM65_CPSW_PORTN_REG_SA_H);
+ writel(mac_lo, slave->port_base + AM65_CPSW_PORTN_REG_SA_L);
+}
+
+static void am65_cpsw_sl_ctl_reset(struct am65_cpsw_port *port)
+{
+ cpsw_sl_reset(port->slave.mac_sl, 100);
+ /* Max length register has to be restored after MAC SL reset */
+ writel(AM65_CPSW_MAX_PACKET_SIZE,
+ port->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
+}
+
+static void am65_cpsw_nuss_get_ver(struct am65_cpsw_common *common)
+{
+ common->nuss_ver = readl(common->ss_base);
+ common->cpsw_ver = readl(common->cpsw_base);
+ dev_info(common->dev,
+ "initializing am65 cpsw nuss version 0x%08X, cpsw version 0x%08X Ports: %u quirks:%08x\n",
+ common->nuss_ver,
+ common->cpsw_ver,
+ common->port_num + 1,
+ common->pdata.quirks);
+}
+
+static int am65_cpsw_nuss_ndo_slave_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 port_mask, unreg_mcast = 0;
+ int ret;
+
+ if (!common->is_emac_mode)
+ return 0;
+
+ if (!netif_running(ndev) || !vid)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
+ return ret;
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+ if (!vid)
+ unreg_mcast = port_mask;
+ dev_info(common->dev, "Adding vlan %d to vlan filter\n", vid);
+ ret = cpsw_ale_vlan_add_modify(common->ale, vid, port_mask,
+ unreg_mcast, port_mask, 0);
+
+ pm_runtime_put(common->dev);
+ return ret;
+}
+
+static int am65_cpsw_nuss_ndo_slave_kill_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int ret;
+
+ if (!common->is_emac_mode)
+ return 0;
+
+ if (!netif_running(ndev) || !vid)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
+ return ret;
+
+ dev_info(common->dev, "Removing vlan %d from vlan filter\n", vid);
+ ret = cpsw_ale_del_vlan(common->ale, vid,
+ BIT(port->port_id) | ALE_PORT_HOST);
+
+ pm_runtime_put(common->dev);
+ return ret;
+}
+
+static void am65_cpsw_slave_set_promisc(struct am65_cpsw_port *port,
+ bool promisc)
+{
+ struct am65_cpsw_common *common = port->common;
+
+ if (promisc && !common->is_emac_mode) {
+ dev_dbg(common->dev, "promisc mode requested in switch mode");
+ return;
+ }
+
+ if (promisc) {
+ /* Enable promiscuous mode */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY_CAF, 1);
+ dev_dbg(common->dev, "promisc enabled\n");
+ } else {
+ /* Disable promiscuous mode */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY_CAF, 0);
+ dev_dbg(common->dev, "promisc disabled\n");
+ }
+}
+
+static void am65_cpsw_nuss_ndo_slave_set_rx_mode(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 port_mask;
+ bool promisc;
+
+ promisc = !!(ndev->flags & IFF_PROMISC);
+ am65_cpsw_slave_set_promisc(port, promisc);
+
+ if (promisc)
+ return;
+
+ /* Restore allmulti on vlans if necessary */
+ cpsw_ale_set_allmulti(common->ale,
+ ndev->flags & IFF_ALLMULTI, port->port_id);
+
+ port_mask = ALE_PORT_HOST;
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(common->ale, port_mask, -1);
+
+ if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ /* program multicast address list into ALE register */
+ netdev_for_each_mc_addr(ha, ndev) {
+ cpsw_ale_add_mcast(common->ale, ha->addr,
+ port_mask, 0, 0, 0);
+ }
+ }
+}
+
+static void am65_cpsw_nuss_ndo_host_tx_timeout(struct net_device *ndev,
+ unsigned int txqueue)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned long trans_start;
+
+ netif_txq = netdev_get_tx_queue(ndev, txqueue);
+ tx_chn = &common->tx_chns[txqueue];
+ trans_start = READ_ONCE(netif_txq->trans_start);
+
+ netdev_err(ndev, "txq:%d DRV_XOFF:%d tmo:%u dql_avail:%d free_desc:%zu\n",
+ txqueue,
+ netif_tx_queue_stopped(netif_txq),
+ jiffies_to_msecs(jiffies - trans_start),
+ dql_avail(&netif_txq->dql),
+ k3_cppi_desc_pool_avail(tx_chn->desc_pool));
+
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* try recover if stopped by us */
+ txq_trans_update(netif_txq);
+ netif_tx_wake_queue(netif_txq);
+ }
+}
+
+static int am65_cpsw_nuss_rx_push(struct am65_cpsw_common *common,
+ struct sk_buff *skb)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct cppi5_host_desc_t *desc_rx;
+ struct device *dev = common->dev;
+ u32 pkt_len = skb_tailroom(skb);
+ dma_addr_t desc_dma;
+ dma_addr_t buf_dma;
+ void *swdata;
+
+ desc_rx = k3_cppi_desc_pool_alloc(rx_chn->desc_pool);
+ if (!desc_rx) {
+ dev_err(dev, "Failed to allocate RXFDQ descriptor\n");
+ return -ENOMEM;
+ }
+ desc_dma = k3_cppi_desc_pool_virt2dma(rx_chn->desc_pool, desc_rx);
+
+ buf_dma = dma_map_single(rx_chn->dma_dev, skb->data, pkt_len,
+ DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(rx_chn->dma_dev, buf_dma))) {
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+ dev_err(dev, "Failed to map rx skb buffer\n");
+ return -EINVAL;
+ }
+
+ cppi5_hdesc_init(desc_rx, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ k3_udma_glue_rx_dma_to_cppi5_addr(rx_chn->rx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(desc_rx, buf_dma, skb_tailroom(skb), buf_dma, skb_tailroom(skb));
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ *((void **)swdata) = skb;
+
+ return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma);
+}
+
+void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ u32 val, pri_map;
+
+ /* P0 set Receive Priority Type */
+ val = readl(host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
+
+ if (common->pf_p0_rx_ptype_rrobin) {
+ val |= AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
+ /* Enet Ports fifos works in fixed priority mode only, so
+ * reset P0_Rx_Pri_Map so all packet will go in Enet fifo 0
+ */
+ pri_map = 0x0;
+ } else {
+ val &= ~AM65_CPSW_PORT_REG_PRI_CTL_RX_PTYPE_RROBIN;
+ /* restore P0_Rx_Pri_Map */
+ pri_map = 0x76543210;
+ }
+
+ writel(pri_map, host_p->port_base + AM65_CPSW_PORT_REG_RX_PRI_MAP);
+ writel(val, host_p->port_base + AM65_CPSW_PORT_REG_PRI_CTL);
+}
+
+static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common);
+static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common);
+static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port);
+static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port);
+
+static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+ int port_idx, i, ret;
+ struct sk_buff *skb;
+ u32 val, port_mask;
+
+ if (common->usage_count)
+ return 0;
+
+ /* Control register */
+ writel(AM65_CPSW_CTL_P0_ENABLE | AM65_CPSW_CTL_P0_TX_CRC_REMOVE |
+ AM65_CPSW_CTL_VLAN_AWARE | AM65_CPSW_CTL_P0_RX_PAD,
+ common->cpsw_base + AM65_CPSW_REG_CTL);
+ /* Max length register */
+ writel(AM65_CPSW_MAX_PACKET_SIZE,
+ host_p->port_base + AM65_CPSW_PORT_REG_RX_MAXLEN);
+ /* set base flow_id */
+ writel(common->rx_flow_id_base,
+ host_p->port_base + AM65_CPSW_PORT0_REG_FLOW_ID_OFFSET);
+ /* en tx crc offload */
+ writel(AM65_CPSW_P0_REG_CTL_RX_CHECKSUM_EN, host_p->port_base + AM65_CPSW_P0_REG_CTL);
+
+ am65_cpsw_nuss_set_p0_ptype(common);
+
+ /* enable statistic */
+ val = BIT(HOST_PORT_NUM);
+ for (port_idx = 0; port_idx < common->port_num; port_idx++) {
+ struct am65_cpsw_port *port = &common->ports[port_idx];
+
+ if (!port->disabled)
+ val |= BIT(port->port_id);
+ }
+ writel(val, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
+
+ /* disable priority elevation */
+ writel(0, common->cpsw_base + AM65_CPSW_REG_PTYPE);
+
+ cpsw_ale_start(common->ale);
+
+ /* limit to one RX flow only */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_DEFAULT_THREAD_ID, 0);
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_DEFAULT_THREAD_ENABLE, 1);
+ /* switch to vlan unaware mode */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_VLAN_AWARE, 1);
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ /* default vlan cfg: create mask based on enabled ports */
+ port_mask = GENMASK(common->port_num, 0) &
+ ~common->disabled_ports_mask;
+
+ cpsw_ale_add_vlan(common->ale, 0, port_mask,
+ port_mask, port_mask,
+ port_mask & ~ALE_PORT_HOST);
+
+ if (common->is_emac_mode)
+ am65_cpsw_init_host_port_emac(common);
+ else
+ am65_cpsw_init_host_port_switch(common);
+
+ for (i = 0; i < common->rx_chns.descs_num; i++) {
+ skb = __netdev_alloc_skb_ip_align(NULL,
+ AM65_CPSW_MAX_PACKET_SIZE,
+ GFP_KERNEL);
+ if (!skb) {
+ dev_err(common->dev, "cannot allocate skb\n");
+ return -ENOMEM;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, skb);
+ if (ret < 0) {
+ dev_err(common->dev,
+ "cannot submit skb to channel rx, error %d\n",
+ ret);
+ kfree_skb(skb);
+ return ret;
+ }
+ kmemleak_not_leak(skb);
+ }
+ k3_udma_glue_enable_rx_chn(common->rx_chns.rx_chn);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ ret = k3_udma_glue_enable_tx_chn(common->tx_chns[i].tx_chn);
+ if (ret)
+ return ret;
+ napi_enable(&common->tx_chns[i].napi_tx);
+ }
+
+ napi_enable(&common->napi_rx);
+ if (common->rx_irq_disabled) {
+ common->rx_irq_disabled = false;
+ enable_irq(common->rx_chns.irq);
+ }
+
+ dev_dbg(common->dev, "cpsw_nuss started\n");
+ return 0;
+}
+
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma);
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma);
+
+static int am65_cpsw_nuss_common_stop(struct am65_cpsw_common *common)
+{
+ int i;
+
+ if (common->usage_count != 1)
+ return 0;
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ /* shutdown tx channels */
+ atomic_set(&common->tdown_cnt, common->tx_ch_num);
+ /* ensure new tdown_cnt value is visible */
+ smp_mb__after_atomic();
+ reinit_completion(&common->tdown_complete);
+
+ for (i = 0; i < common->tx_ch_num; i++)
+ k3_udma_glue_tdown_tx_chn(common->tx_chns[i].tx_chn, false);
+
+ i = wait_for_completion_timeout(&common->tdown_complete,
+ msecs_to_jiffies(1000));
+ if (!i)
+ dev_err(common->dev, "tx timeout\n");
+ for (i = 0; i < common->tx_ch_num; i++)
+ napi_disable(&common->tx_chns[i].napi_tx);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ k3_udma_glue_reset_tx_chn(common->tx_chns[i].tx_chn,
+ &common->tx_chns[i],
+ am65_cpsw_nuss_tx_cleanup);
+ k3_udma_glue_disable_tx_chn(common->tx_chns[i].tx_chn);
+ }
+
+ reinit_completion(&common->tdown_complete);
+ k3_udma_glue_tdown_rx_chn(common->rx_chns.rx_chn, true);
+
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ) {
+ i = wait_for_completion_timeout(&common->tdown_complete, msecs_to_jiffies(1000));
+ if (!i)
+ dev_err(common->dev, "rx teardown timeout\n");
+ }
+
+ napi_disable(&common->napi_rx);
+
+ for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+ k3_udma_glue_reset_rx_chn(common->rx_chns.rx_chn, i,
+ &common->rx_chns,
+ am65_cpsw_nuss_rx_cleanup, !!i);
+
+ k3_udma_glue_disable_rx_chn(common->rx_chns.rx_chn);
+
+ cpsw_ale_stop(common->ale);
+
+ writel(0, common->cpsw_base + AM65_CPSW_REG_CTL);
+ writel(0, common->cpsw_base + AM65_CPSW_REG_STAT_PORT_EN);
+
+ dev_dbg(common->dev, "cpsw_nuss stopped\n");
+ return 0;
+}
+
+static int am65_cpsw_nuss_ndo_slave_stop(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int ret;
+
+ phylink_stop(port->slave.phylink);
+
+ netif_tx_stop_all_queues(ndev);
+
+ phylink_disconnect_phy(port->slave.phylink);
+
+ ret = am65_cpsw_nuss_common_stop(common);
+ if (ret)
+ return ret;
+
+ common->usage_count--;
+ pm_runtime_put(common->dev);
+ return 0;
+}
+
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct am65_cpsw_port *port = arg;
+
+ if (!vdev)
+ return 0;
+
+ return am65_cpsw_nuss_ndo_slave_add_vid(port->ndev, 0, vid);
+}
+
+static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int ret, i;
+
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, common->tx_ch_num);
+ if (ret) {
+ dev_err(common->dev, "cannot set real number of tx queues\n");
+ goto runtime_put;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, AM65_CPSW_MAX_RX_QUEUES);
+ if (ret) {
+ dev_err(common->dev, "cannot set real number of rx queues\n");
+ goto runtime_put;
+ }
+
+ for (i = 0; i < common->tx_ch_num; i++)
+ netdev_tx_reset_queue(netdev_get_tx_queue(ndev, i));
+
+ ret = am65_cpsw_nuss_common_open(common);
+ if (ret)
+ goto runtime_put;
+
+ common->usage_count++;
+
+ am65_cpsw_port_set_sl_mac(port, ndev->dev_addr);
+
+ if (common->is_emac_mode)
+ am65_cpsw_init_port_emac_ale(port);
+ else
+ am65_cpsw_init_port_switch_ale(port);
+
+ /* mac_sl should be configured via phy-link interface */
+ am65_cpsw_sl_ctl_reset(port);
+
+ ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0);
+ if (ret)
+ goto error_cleanup;
+
+ /* restore vlan configurations */
+ vlan_for_each(ndev, cpsw_restore_vlans, port);
+
+ phylink_start(port->slave.phylink);
+
+ return 0;
+
+error_cleanup:
+ am65_cpsw_nuss_ndo_slave_stop(ndev);
+ return ret;
+
+runtime_put:
+ pm_runtime_put(common->dev);
+ return ret;
+}
+
+static void am65_cpsw_nuss_rx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_rx_chn *rx_chn = data;
+ struct cppi5_host_desc_t *desc_rx;
+ struct sk_buff *skb;
+ dma_addr_t buf_dma;
+ u32 buf_dma_len;
+ void **swdata;
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static void am65_cpsw_nuss_rx_ts(struct sk_buff *skb, u32 *psdata)
+{
+ struct skb_shared_hwtstamps *ssh;
+ u64 ns;
+
+ ns = ((u64)psdata[1] << 32) | psdata[0];
+
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+
+/* RX psdata[2] word format - checksum information */
+#define AM65_CPSW_RX_PSD_CSUM_ADD GENMASK(15, 0)
+#define AM65_CPSW_RX_PSD_CSUM_ERR BIT(16)
+#define AM65_CPSW_RX_PSD_IS_FRAGMENT BIT(17)
+#define AM65_CPSW_RX_PSD_IS_TCP BIT(18)
+#define AM65_CPSW_RX_PSD_IPV6_VALID BIT(19)
+#define AM65_CPSW_RX_PSD_IPV4_VALID BIT(20)
+
+static void am65_cpsw_nuss_rx_csum(struct sk_buff *skb, u32 csum_info)
+{
+ /* HW can verify IPv4/IPv6 TCP/UDP packets checksum
+ * csum information provides in psdata[2] word:
+ * AM65_CPSW_RX_PSD_CSUM_ERR bit - indicates csum error
+ * AM65_CPSW_RX_PSD_IPV6_VALID and AM65_CPSW_RX_PSD_IPV4_VALID
+ * bits - indicates IPv4/IPv6 packet
+ * AM65_CPSW_RX_PSD_IS_FRAGMENT bit - indicates fragmented packet
+ * AM65_CPSW_RX_PSD_CSUM_ADD has value 0xFFFF for non fragmented packets
+ * or csum value for fragmented packets if !AM65_CPSW_RX_PSD_CSUM_ERR
+ */
+ skb_checksum_none_assert(skb);
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM)))
+ return;
+
+ if ((csum_info & (AM65_CPSW_RX_PSD_IPV6_VALID |
+ AM65_CPSW_RX_PSD_IPV4_VALID)) &&
+ !(csum_info & AM65_CPSW_RX_PSD_CSUM_ERR)) {
+ /* csum for fragmented packets is unsupported */
+ if (!(csum_info & AM65_CPSW_RX_PSD_IS_FRAGMENT))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+}
+
+static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_common *common,
+ u32 flow_idx)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ u32 buf_dma_len, pkt_len, port_id = 0, csum_info;
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+ struct cppi5_host_desc_t *desc_rx;
+ struct device *dev = common->dev;
+ struct sk_buff *skb, *new_skb;
+ dma_addr_t desc_dma, buf_dma;
+ struct am65_cpsw_port *port;
+ struct net_device *ndev;
+ void **swdata;
+ u32 *psdata;
+ int ret = 0;
+
+ ret = k3_udma_glue_pop_rx_chn(rx_chn->rx_chn, flow_idx, &desc_dma);
+ if (ret) {
+ if (ret != -ENODATA)
+ dev_err(dev, "RX: pop chn fail %d\n", ret);
+ return ret;
+ }
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ dev_dbg(dev, "%s RX tdown flow: %u\n", __func__, flow_idx);
+ if (common->pdata.quirks & AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ)
+ complete(&common->tdown_complete);
+ return 0;
+ }
+
+ desc_rx = k3_cppi_desc_pool_dma2virt(rx_chn->desc_pool, desc_dma);
+ dev_dbg(dev, "%s flow_idx: %u desc %pad\n",
+ __func__, flow_idx, &desc_dma);
+
+ swdata = cppi5_hdesc_get_swdata(desc_rx);
+ skb = *swdata;
+ cppi5_hdesc_get_obuf(desc_rx, &buf_dma, &buf_dma_len);
+ k3_udma_glue_rx_cppi5_to_dma_addr(rx_chn->rx_chn, &buf_dma);
+ pkt_len = cppi5_hdesc_get_pktlen(desc_rx);
+ cppi5_desc_get_tags_ids(&desc_rx->hdr, &port_id, NULL);
+ dev_dbg(dev, "%s rx port_id:%d\n", __func__, port_id);
+ port = am65_common_get_port(common, port_id);
+ ndev = port->ndev;
+ skb->dev = ndev;
+
+ psdata = cppi5_hdesc_get_psdata(desc_rx);
+ /* add RX timestamp */
+ if (port->rx_ts_enabled)
+ am65_cpsw_nuss_rx_ts(skb, psdata);
+ csum_info = psdata[2];
+ dev_dbg(dev, "%s rx csum_info:%#x\n", __func__, csum_info);
+
+ dma_unmap_single(rx_chn->dma_dev, buf_dma, buf_dma_len, DMA_FROM_DEVICE);
+
+ k3_cppi_desc_pool_free(rx_chn->desc_pool, desc_rx);
+
+ new_skb = netdev_alloc_skb_ip_align(ndev, AM65_CPSW_MAX_PACKET_SIZE);
+ if (new_skb) {
+ ndev_priv = netdev_priv(ndev);
+ am65_cpsw_nuss_set_offload_fwd_mark(skb, ndev_priv->offload_fwd_mark);
+ skb_put(skb, pkt_len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ am65_cpsw_nuss_rx_csum(skb, csum_info);
+ napi_gro_receive(&common->napi_rx, skb);
+
+ stats = this_cpu_ptr(ndev_priv->stats);
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_packets++;
+ stats->rx_bytes += pkt_len;
+ u64_stats_update_end(&stats->syncp);
+ kmemleak_not_leak(new_skb);
+ } else {
+ ndev->stats.rx_dropped++;
+ new_skb = skb;
+ }
+
+ if (netif_dormant(ndev)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_dropped++;
+ return 0;
+ }
+
+ ret = am65_cpsw_nuss_rx_push(common, new_skb);
+ if (WARN_ON(ret < 0)) {
+ dev_kfree_skb_any(new_skb);
+ ndev->stats.rx_errors++;
+ ndev->stats.rx_dropped++;
+ }
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct am65_cpsw_common *common = am65_cpsw_napi_to_common(napi_rx);
+ int flow = AM65_CPSW_MAX_RX_FLOWS;
+ int cur_budget, ret;
+ int num_rx = 0;
+
+ /* process every flow */
+ while (flow--) {
+ cur_budget = budget - num_rx;
+
+ while (cur_budget--) {
+ ret = am65_cpsw_nuss_rx_packets(common, flow);
+ if (ret)
+ break;
+ num_rx++;
+ }
+
+ if (num_rx >= budget)
+ break;
+ }
+
+ dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget);
+
+ if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) {
+ if (common->rx_irq_disabled) {
+ common->rx_irq_disabled = false;
+ enable_irq(common->rx_chns.irq);
+ }
+ }
+
+ return num_rx;
+}
+
+static void am65_cpsw_nuss_xmit_free(struct am65_cpsw_tx_chn *tx_chn,
+ struct cppi5_host_desc_t *desc)
+{
+ struct cppi5_host_desc_t *first_desc, *next_desc;
+ dma_addr_t buf_dma, next_desc_dma;
+ u32 buf_dma_len;
+
+ first_desc = desc;
+ next_desc = first_desc;
+
+ cppi5_hdesc_get_obuf(first_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, buf_dma_len, DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(first_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+ while (next_desc_dma) {
+ next_desc = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ next_desc_dma);
+ cppi5_hdesc_get_obuf(next_desc, &buf_dma, &buf_dma_len);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &buf_dma);
+
+ dma_unmap_page(tx_chn->dma_dev, buf_dma, buf_dma_len,
+ DMA_TO_DEVICE);
+
+ next_desc_dma = cppi5_hdesc_get_next_hbdesc(next_desc);
+ k3_udma_glue_tx_cppi5_to_dma_addr(tx_chn->tx_chn, &next_desc_dma);
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ }
+
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc);
+}
+
+static void am65_cpsw_nuss_tx_cleanup(void *data, dma_addr_t desc_dma)
+{
+ struct am65_cpsw_tx_chn *tx_chn = data;
+ struct cppi5_host_desc_t *desc_tx;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool, desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+
+ dev_kfree_skb_any(skb);
+}
+
+static struct sk_buff *
+am65_cpsw_nuss_tx_compl_packet(struct am65_cpsw_tx_chn *tx_chn,
+ dma_addr_t desc_dma)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct am65_cpsw_ndev_stats *stats;
+ struct cppi5_host_desc_t *desc_tx;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ void **swdata;
+
+ desc_tx = k3_cppi_desc_pool_dma2virt(tx_chn->desc_pool,
+ desc_dma);
+ swdata = cppi5_hdesc_get_swdata(desc_tx);
+ skb = *(swdata);
+ am65_cpsw_nuss_xmit_free(tx_chn, desc_tx);
+
+ ndev = skb->dev;
+
+ am65_cpts_tx_timestamp(tx_chn->common->cpts, skb);
+
+ ndev_priv = netdev_priv(ndev);
+ stats = this_cpu_ptr(ndev_priv->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_packets++;
+ stats->tx_bytes += skb->len;
+ u64_stats_update_end(&stats->syncp);
+
+ return skb;
+}
+
+static void am65_cpsw_nuss_tx_wake(struct am65_cpsw_tx_chn *tx_chn, struct net_device *ndev,
+ struct netdev_queue *netif_txq)
+{
+ if (netif_tx_queue_stopped(netif_txq)) {
+ /* Check whether the queue is stopped due to stalled
+ * tx dma, if the queue is stopped then wake the queue
+ * as we have free desc for tx
+ */
+ __netif_tx_lock(netif_txq, smp_processor_id());
+ if (netif_running(ndev) &&
+ (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >= MAX_SKB_FRAGS))
+ netif_tx_wake_queue(netif_txq);
+
+ __netif_tx_unlock(netif_txq);
+ }
+}
+
+static int am65_cpsw_nuss_tx_compl_packets(struct am65_cpsw_common *common,
+ int chn, unsigned int budget)
+{
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned int total_bytes = 0;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+
+ tx_chn = &common->tx_chns[chn];
+
+ while (true) {
+ spin_lock(&tx_chn->lock);
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ spin_unlock(&tx_chn->lock);
+ if (res == -ENODATA)
+ break;
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&common->tdown_cnt))
+ complete(&common->tdown_complete);
+ break;
+ }
+
+ skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
+ total_bytes = skb->len;
+ ndev = skb->dev;
+ napi_consume_skb(skb, budget);
+ num_tx++;
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+ }
+
+ dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
+
+ return num_tx;
+}
+
+static int am65_cpsw_nuss_tx_compl_packets_2g(struct am65_cpsw_common *common,
+ int chn, unsigned int budget)
+{
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ unsigned int total_bytes = 0;
+ struct net_device *ndev;
+ struct sk_buff *skb;
+ dma_addr_t desc_dma;
+ int res, num_tx = 0;
+
+ tx_chn = &common->tx_chns[chn];
+
+ while (true) {
+ res = k3_udma_glue_pop_tx_chn(tx_chn->tx_chn, &desc_dma);
+ if (res == -ENODATA)
+ break;
+
+ if (cppi5_desc_is_tdcm(desc_dma)) {
+ if (atomic_dec_and_test(&common->tdown_cnt))
+ complete(&common->tdown_complete);
+ break;
+ }
+
+ skb = am65_cpsw_nuss_tx_compl_packet(tx_chn, desc_dma);
+
+ ndev = skb->dev;
+ total_bytes += skb->len;
+ napi_consume_skb(skb, budget);
+ num_tx++;
+ }
+
+ if (!num_tx)
+ return 0;
+
+ netif_txq = netdev_get_tx_queue(ndev, chn);
+
+ netdev_tx_completed_queue(netif_txq, num_tx, total_bytes);
+
+ am65_cpsw_nuss_tx_wake(tx_chn, ndev, netif_txq);
+
+ dev_dbg(dev, "%s:%u pkt:%d\n", __func__, chn, num_tx);
+
+ return num_tx;
+}
+
+static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct am65_cpsw_tx_chn *tx_chn = am65_cpsw_napi_to_tx_chn(napi_tx);
+ int num_tx;
+
+ if (AM65_CPSW_IS_CPSW2G(tx_chn->common))
+ num_tx = am65_cpsw_nuss_tx_compl_packets_2g(tx_chn->common, tx_chn->id, budget);
+ else
+ num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget);
+
+ if (num_tx >= budget)
+ return budget;
+
+ if (napi_complete_done(napi_tx, num_tx))
+ enable_irq(tx_chn->irq);
+
+ return 0;
+}
+
+static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id)
+{
+ struct am65_cpsw_common *common = dev_id;
+
+ common->rx_irq_disabled = true;
+ disable_irq_nosync(irq);
+ napi_schedule(&common->napi_rx);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t am65_cpsw_nuss_tx_irq(int irq, void *dev_id)
+{
+ struct am65_cpsw_tx_chn *tx_chn = dev_id;
+
+ disable_irq_nosync(irq);
+ napi_schedule(&tx_chn->napi_tx);
+
+ return IRQ_HANDLED;
+}
+
+static netdev_tx_t am65_cpsw_nuss_ndo_slave_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct device *dev = common->dev;
+ struct am65_cpsw_tx_chn *tx_chn;
+ struct netdev_queue *netif_txq;
+ dma_addr_t desc_dma, buf_dma;
+ int ret, q_idx, i;
+ void **swdata;
+ u32 *psdata;
+ u32 pkt_len;
+
+ /* padding enabled in hw */
+ pkt_len = skb_headlen(skb);
+
+ /* SKB TX timestamp */
+ if (port->tx_ts_enabled)
+ am65_cpts_prep_tx_timestamp(common->cpts, skb);
+
+ q_idx = skb_get_queue_mapping(skb);
+ dev_dbg(dev, "%s skb_queue:%d\n", __func__, q_idx);
+
+ tx_chn = &common->tx_chns[q_idx];
+ netif_txq = netdev_get_tx_queue(ndev, q_idx);
+
+ /* Map the linear buffer */
+ buf_dma = dma_map_single(tx_chn->dma_dev, skb->data, pkt_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
+ dev_err(dev, "Failed to map tx skb buffer\n");
+ ndev->stats.tx_errors++;
+ goto err_free_skb;
+ }
+
+ first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!first_desc) {
+ dev_dbg(dev, "Failed to allocate descriptor\n");
+ dma_unmap_single(tx_chn->dma_dev, buf_dma, pkt_len,
+ DMA_TO_DEVICE);
+ goto busy_stop_q;
+ }
+
+ cppi5_hdesc_init(first_desc, CPPI5_INFO0_HDESC_EPIB_PRESENT,
+ AM65_CPSW_NAV_PS_DATA_SIZE);
+ cppi5_desc_set_pktids(&first_desc->hdr, 0, 0x3FFF);
+ cppi5_hdesc_set_pkttype(first_desc, 0x7);
+ cppi5_desc_set_tags_ids(&first_desc->hdr, 0, port->port_id);
+
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(first_desc, buf_dma, pkt_len, buf_dma, pkt_len);
+ swdata = cppi5_hdesc_get_swdata(first_desc);
+ *(swdata) = skb;
+ psdata = cppi5_hdesc_get_psdata(first_desc);
+
+ /* HW csum offload if enabled */
+ psdata[2] = 0;
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+ unsigned int cs_start, cs_offset;
+
+ cs_start = skb_transport_offset(skb);
+ cs_offset = cs_start + skb->csum_offset;
+ /* HW numerates bytes starting from 1 */
+ psdata[2] = ((cs_offset + 1) << 24) |
+ ((cs_start + 1) << 16) | (skb->len - cs_start);
+ dev_dbg(dev, "%s tx psdata:%#x\n", __func__, psdata[2]);
+ }
+
+ if (!skb_is_nonlinear(skb))
+ goto done_tx;
+
+ dev_dbg(dev, "fragmented SKB\n");
+
+ /* Handle the case where skb is fragmented in pages */
+ cur_desc = first_desc;
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ u32 frag_size = skb_frag_size(frag);
+
+ next_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool);
+ if (!next_desc) {
+ dev_err(dev, "Failed to allocate descriptor\n");
+ goto busy_free_descs;
+ }
+
+ buf_dma = skb_frag_dma_map(tx_chn->dma_dev, frag, 0, frag_size,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(tx_chn->dma_dev, buf_dma))) {
+ dev_err(dev, "Failed to map tx skb page\n");
+ k3_cppi_desc_pool_free(tx_chn->desc_pool, next_desc);
+ ndev->stats.tx_errors++;
+ goto err_free_descs;
+ }
+
+ cppi5_hdesc_reset_hbdesc(next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &buf_dma);
+ cppi5_hdesc_attach_buf(next_desc,
+ buf_dma, frag_size, buf_dma, frag_size);
+
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool,
+ next_desc);
+ k3_udma_glue_tx_dma_to_cppi5_addr(tx_chn->tx_chn, &desc_dma);
+ cppi5_hdesc_link_hbdesc(cur_desc, desc_dma);
+
+ pkt_len += frag_size;
+ cur_desc = next_desc;
+ }
+ WARN_ON(pkt_len != skb->len);
+
+done_tx:
+ skb_tx_timestamp(skb);
+
+ /* report bql before sending packet */
+ netdev_tx_sent_queue(netif_txq, pkt_len);
+
+ cppi5_hdesc_set_pktlen(first_desc, pkt_len);
+ desc_dma = k3_cppi_desc_pool_virt2dma(tx_chn->desc_pool, first_desc);
+ if (AM65_CPSW_IS_CPSW2G(common)) {
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ } else {
+ spin_lock_bh(&tx_chn->lock);
+ ret = k3_udma_glue_push_tx_chn(tx_chn->tx_chn, first_desc, desc_dma);
+ spin_unlock_bh(&tx_chn->lock);
+ }
+ if (ret) {
+ dev_err(dev, "can't push desc %d\n", ret);
+ /* inform bql */
+ netdev_tx_completed_queue(netif_txq, 1, pkt_len);
+ ndev->stats.tx_errors++;
+ goto err_free_descs;
+ }
+
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) < MAX_SKB_FRAGS) {
+ netif_tx_stop_queue(netif_txq);
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+ dev_dbg(dev, "netif_tx_stop_queue %d\n", q_idx);
+
+ /* re-check for smp */
+ if (k3_cppi_desc_pool_avail(tx_chn->desc_pool) >=
+ MAX_SKB_FRAGS) {
+ netif_tx_wake_queue(netif_txq);
+ dev_dbg(dev, "netif_tx_wake_queue %d\n", q_idx);
+ }
+ }
+
+ return NETDEV_TX_OK;
+
+err_free_descs:
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
+err_free_skb:
+ ndev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+
+busy_free_descs:
+ am65_cpsw_nuss_xmit_free(tx_chn, first_desc);
+busy_stop_q:
+ netif_tx_stop_queue(netif_txq);
+ return NETDEV_TX_BUSY;
+}
+
+static int am65_cpsw_nuss_ndo_slave_set_mac_address(struct net_device *ndev,
+ void *addr)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct sockaddr *sockaddr = (struct sockaddr *)addr;
+ int ret;
+
+ ret = eth_prepare_mac_addr_change(ndev, addr);
+ if (ret < 0)
+ return ret;
+
+ ret = pm_runtime_resume_and_get(common->dev);
+ if (ret < 0)
+ return ret;
+
+ cpsw_ale_del_ucast(common->ale, ndev->dev_addr,
+ HOST_PORT_NUM, 0, 0);
+ cpsw_ale_add_ucast(common->ale, sockaddr->sa_data,
+ HOST_PORT_NUM, ALE_SECURE, 0);
+
+ am65_cpsw_port_set_sl_mac(port, addr);
+ eth_commit_mac_addr_change(ndev, sockaddr);
+
+ pm_runtime_put(common->dev);
+
+ return 0;
+}
+
+static int am65_cpsw_nuss_hwtstamp_set(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 ts_ctrl, seq_id, ts_ctrl_ltype2, ts_vlan_ltype;
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ /* TX HW timestamp */
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ port->rx_ts_enabled = false;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ port->rx_ts_enabled = true;
+ cfg.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ port->tx_ts_enabled = (cfg.tx_type == HWTSTAMP_TX_ON);
+
+ /* cfg TX timestamp */
+ seq_id = (AM65_CPSW_TS_SEQ_ID_OFFSET <<
+ AM65_CPSW_PN_TS_SEQ_ID_OFFSET_SHIFT) | ETH_P_1588;
+
+ ts_vlan_ltype = ETH_P_8021Q;
+
+ ts_ctrl_ltype2 = ETH_P_1588 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_107 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_129 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_130 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_131 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_132 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_319 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_320 |
+ AM65_CPSW_PN_TS_CTL_LTYPE2_TS_TTL_NONZERO;
+
+ ts_ctrl = AM65_CPSW_TS_EVENT_MSG_TYPE_BITS <<
+ AM65_CPSW_PN_TS_CTL_MSG_TYPE_EN_SHIFT;
+
+ if (port->tx_ts_enabled)
+ ts_ctrl |= AM65_CPSW_TS_TX_ANX_ALL_EN |
+ AM65_CPSW_PN_TS_CTL_TX_VLAN_LT1_EN;
+
+ writel(seq_id, port->port_base + AM65_CPSW_PORTN_REG_TS_SEQ_LTYPE_REG);
+ writel(ts_vlan_ltype, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_VLAN_LTYPE_REG);
+ writel(ts_ctrl_ltype2, port->port_base +
+ AM65_CPSW_PORTN_REG_TS_CTL_LTYPE2);
+ writel(ts_ctrl, port->port_base + AM65_CPSW_PORTN_REG_TS_CTL);
+
+ /* en/dis RX timestamp */
+ am65_cpts_rx_enable(common->cpts, port->rx_ts_enabled);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int am65_cpsw_nuss_hwtstamp_get(struct net_device *ndev,
+ struct ifreq *ifr)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct hwtstamp_config cfg;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = port->tx_ts_enabled ?
+ HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = port->rx_ts_enabled ?
+ HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int am65_cpsw_nuss_ndo_slave_ioctl(struct net_device *ndev,
+ struct ifreq *req, int cmd)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_set(ndev, req);
+ case SIOCGHWTSTAMP:
+ return am65_cpsw_nuss_hwtstamp_get(ndev, req);
+ }
+
+ return phylink_mii_ioctl(port->slave.phylink, req, cmd);
+}
+
+static void am65_cpsw_nuss_ndo_get_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv = netdev_priv(dev);
+ unsigned int start;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ struct am65_cpsw_ndev_stats *cpu_stats;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+
+ cpu_stats = per_cpu_ptr(ndev_priv->stats, cpu);
+ do {
+ start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+ rx_packets = cpu_stats->rx_packets;
+ rx_bytes = cpu_stats->rx_bytes;
+ tx_packets = cpu_stats->tx_packets;
+ tx_bytes = cpu_stats->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+ stats->rx_packets += rx_packets;
+ stats->rx_bytes += rx_bytes;
+ stats->tx_packets += tx_packets;
+ stats->tx_bytes += tx_bytes;
+ }
+
+ stats->rx_errors = dev->stats.rx_errors;
+ stats->rx_dropped = dev->stats.rx_dropped;
+ stats->tx_dropped = dev->stats.tx_dropped;
+}
+
+static struct devlink_port *am65_cpsw_ndo_get_devlink_port(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ return &port->devlink_port;
+}
+
+static const struct net_device_ops am65_cpsw_nuss_netdev_ops = {
+ .ndo_open = am65_cpsw_nuss_ndo_slave_open,
+ .ndo_stop = am65_cpsw_nuss_ndo_slave_stop,
+ .ndo_start_xmit = am65_cpsw_nuss_ndo_slave_xmit,
+ .ndo_set_rx_mode = am65_cpsw_nuss_ndo_slave_set_rx_mode,
+ .ndo_get_stats64 = am65_cpsw_nuss_ndo_get_stats,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = am65_cpsw_nuss_ndo_slave_set_mac_address,
+ .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout,
+ .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid,
+ .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid,
+ .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl,
+ .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc,
+ .ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port,
+};
+
+static void am65_cpsw_nuss_mac_config(struct phylink_config *config, unsigned int mode,
+ const struct phylink_link_state *state)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+
+ if (common->pdata.extra_modes & BIT(state->interface))
+ writel(AM65_CPSW_SGMII_CONTROL_MR_AN_ENABLE,
+ port->sgmii_base + AM65_CPSW_SGMII_CONTROL_REG);
+}
+
+static void am65_cpsw_nuss_mac_link_down(struct phylink_config *config, unsigned int mode,
+ phy_interface_t interface)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+ struct net_device *ndev = port->ndev;
+ int tmo;
+
+ /* disable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, CPSW_SL_CTL_CMD_IDLE);
+
+ tmo = cpsw_sl_wait_for_idle(port->slave.mac_sl, 100);
+ dev_dbg(common->dev, "down msc_sl %08x tmo %d\n",
+ cpsw_sl_reg_read(port->slave.mac_sl, CPSW_SL_MACSTATUS), tmo);
+
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+
+ am65_cpsw_qos_link_down(ndev);
+ netif_tx_stop_all_queues(ndev);
+}
+
+static void am65_cpsw_nuss_mac_link_up(struct phylink_config *config, struct phy_device *phy,
+ unsigned int mode, phy_interface_t interface, int speed,
+ int duplex, bool tx_pause, bool rx_pause)
+{
+ struct am65_cpsw_slave_data *slave = container_of(config, struct am65_cpsw_slave_data,
+ phylink_config);
+ struct am65_cpsw_port *port = container_of(slave, struct am65_cpsw_port, slave);
+ struct am65_cpsw_common *common = port->common;
+ u32 mac_control = CPSW_SL_CTL_GMII_EN;
+ struct net_device *ndev = port->ndev;
+
+ if (speed == SPEED_1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (speed == SPEED_10 && phy_interface_mode_is_rgmii(interface))
+ /* Can be used with in band mode only */
+ mac_control |= CPSW_SL_CTL_EXT_EN;
+ if (speed == SPEED_100 && interface == PHY_INTERFACE_MODE_RMII)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ if (duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* rx_pause/tx_pause */
+ if (rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ cpsw_sl_ctl_set(port->slave.mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ am65_cpsw_qos_link_up(ndev, speed);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static const struct phylink_mac_ops am65_cpsw_phylink_mac_ops = {
+ .validate = phylink_generic_validate,
+ .mac_config = am65_cpsw_nuss_mac_config,
+ .mac_link_down = am65_cpsw_nuss_mac_link_down,
+ .mac_link_up = am65_cpsw_nuss_mac_link_up,
+};
+
+static void am65_cpsw_nuss_slave_disable_unused(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_common *common = port->common;
+
+ if (!port->disabled)
+ return;
+
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_reset(port->slave.mac_sl, 100);
+ cpsw_sl_ctl_reset(port->slave.mac_sl);
+}
+
+static void am65_cpsw_nuss_free_tx_chns(void *data)
+{
+ struct am65_cpsw_common *common = data;
+ int i;
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+
+void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ int i;
+
+ devm_remove_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ if (tx_chn->irq)
+ devm_free_irq(dev, tx_chn->irq, tx_chn);
+
+ netif_napi_del(&tx_chn->napi_tx);
+
+ if (!IS_ERR_OR_NULL(tx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(tx_chn->desc_pool);
+
+ if (!IS_ERR_OR_NULL(tx_chn->tx_chn))
+ k3_udma_glue_release_tx_chn(tx_chn->tx_chn);
+
+ memset(tx_chn, 0, sizeof(*tx_chn));
+ }
+}
+
+static int am65_cpsw_nuss_init_tx_chns(struct am65_cpsw_common *common)
+{
+ u32 max_desc_num = ALIGN(AM65_CPSW_MAX_TX_DESC, MAX_SKB_FRAGS);
+ struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
+ struct device *dev = common->dev;
+ struct k3_ring_cfg ring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0
+ };
+ u32 hdesc_size;
+ int i, ret = 0;
+
+ hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
+ AM65_CPSW_NAV_SW_DATA_SIZE);
+
+ tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
+ tx_cfg.tx_cfg = ring_cfg;
+ tx_cfg.txcq_cfg = ring_cfg;
+ tx_cfg.tx_cfg.size = max_desc_num;
+ tx_cfg.txcq_cfg.size = max_desc_num;
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ snprintf(tx_chn->tx_chn_name,
+ sizeof(tx_chn->tx_chn_name), "tx%d", i);
+
+ spin_lock_init(&tx_chn->lock);
+ tx_chn->common = common;
+ tx_chn->id = i;
+ tx_chn->descs_num = max_desc_num;
+
+ tx_chn->tx_chn =
+ k3_udma_glue_request_tx_chn(dev,
+ tx_chn->tx_chn_name,
+ &tx_cfg);
+ if (IS_ERR(tx_chn->tx_chn)) {
+ ret = dev_err_probe(dev, PTR_ERR(tx_chn->tx_chn),
+ "Failed to request tx dma channel\n");
+ goto err;
+ }
+ tx_chn->dma_dev = k3_udma_glue_tx_get_dma_device(tx_chn->tx_chn);
+
+ tx_chn->desc_pool = k3_cppi_desc_pool_create_name(tx_chn->dma_dev,
+ tx_chn->descs_num,
+ hdesc_size,
+ tx_chn->tx_chn_name);
+ if (IS_ERR(tx_chn->desc_pool)) {
+ ret = PTR_ERR(tx_chn->desc_pool);
+ dev_err(dev, "Failed to create poll %d\n", ret);
+ goto err;
+ }
+
+ tx_chn->irq = k3_udma_glue_tx_get_irq(tx_chn->tx_chn);
+ if (tx_chn->irq <= 0) {
+ dev_err(dev, "Failed to get tx dma irq %d\n",
+ tx_chn->irq);
+ ret = tx_chn->irq ?: -ENXIO;
+ goto err;
+ }
+
+ snprintf(tx_chn->tx_chn_name,
+ sizeof(tx_chn->tx_chn_name), "%s-tx%d",
+ dev_name(dev), tx_chn->id);
+ }
+
+err:
+ i = devm_add_action(dev, am65_cpsw_nuss_free_tx_chns, common);
+ if (i) {
+ dev_err(dev, "Failed to add free_tx_chns action %d\n", i);
+ return i;
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_nuss_free_rx_chns(void *data)
+{
+ struct am65_cpsw_common *common = data;
+ struct am65_cpsw_rx_chn *rx_chn;
+
+ rx_chn = &common->rx_chns;
+
+ if (!IS_ERR_OR_NULL(rx_chn->desc_pool))
+ k3_cppi_desc_pool_destroy(rx_chn->desc_pool);
+
+ if (!IS_ERR_OR_NULL(rx_chn->rx_chn))
+ k3_udma_glue_release_rx_chn(rx_chn->rx_chn);
+}
+
+static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_rx_chn *rx_chn = &common->rx_chns;
+ struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
+ u32 max_desc_num = AM65_CPSW_MAX_RX_DESC;
+ struct device *dev = common->dev;
+ u32 hdesc_size;
+ u32 fdqring_id;
+ int i, ret = 0;
+
+ hdesc_size = cppi5_hdesc_calc_size(true, AM65_CPSW_NAV_PS_DATA_SIZE,
+ AM65_CPSW_NAV_SW_DATA_SIZE);
+
+ rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
+ rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
+ rx_cfg.flow_id_base = common->rx_flow_id_base;
+
+ /* init all flows */
+ rx_chn->dev = dev;
+ rx_chn->descs_num = max_desc_num;
+
+ rx_chn->rx_chn = k3_udma_glue_request_rx_chn(dev, "rx", &rx_cfg);
+ if (IS_ERR(rx_chn->rx_chn)) {
+ ret = dev_err_probe(dev, PTR_ERR(rx_chn->rx_chn),
+ "Failed to request rx dma channel\n");
+ goto err;
+ }
+ rx_chn->dma_dev = k3_udma_glue_rx_get_dma_device(rx_chn->rx_chn);
+
+ rx_chn->desc_pool = k3_cppi_desc_pool_create_name(rx_chn->dma_dev,
+ rx_chn->descs_num,
+ hdesc_size, "rx");
+ if (IS_ERR(rx_chn->desc_pool)) {
+ ret = PTR_ERR(rx_chn->desc_pool);
+ dev_err(dev, "Failed to create rx poll %d\n", ret);
+ goto err;
+ }
+
+ common->rx_flow_id_base =
+ k3_udma_glue_rx_get_flow_id_base(rx_chn->rx_chn);
+ dev_info(dev, "set new flow-id-base %u\n", common->rx_flow_id_base);
+
+ fdqring_id = K3_RINGACC_RING_ID_ANY;
+ for (i = 0; i < rx_cfg.flow_id_num; i++) {
+ struct k3_ring_cfg rxring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .mode = K3_RINGACC_RING_MODE_RING,
+ .flags = 0,
+ };
+ struct k3_ring_cfg fdqring_cfg = {
+ .elm_size = K3_RINGACC_RING_ELSIZE_8,
+ .flags = K3_RINGACC_RING_SHARED,
+ };
+ struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+ .rx_cfg = rxring_cfg,
+ .rxfdq_cfg = fdqring_cfg,
+ .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+ .src_tag_lo_sel =
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+ };
+
+ rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
+ rx_flow_cfg.rx_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.size = max_desc_num;
+ rx_flow_cfg.rxfdq_cfg.mode = common->pdata.fdqring_mode;
+
+ ret = k3_udma_glue_rx_flow_init(rx_chn->rx_chn,
+ i, &rx_flow_cfg);
+ if (ret) {
+ dev_err(dev, "Failed to init rx flow%d %d\n", i, ret);
+ goto err;
+ }
+ if (!i)
+ fdqring_id =
+ k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn,
+ i);
+
+ rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i);
+
+ if (rx_chn->irq <= 0) {
+ dev_err(dev, "Failed to get rx dma irq %d\n",
+ rx_chn->irq);
+ ret = -ENXIO;
+ goto err;
+ }
+ }
+
+err:
+ i = devm_add_action(dev, am65_cpsw_nuss_free_rx_chns, common);
+ if (i) {
+ dev_err(dev, "Failed to add free_rx_chns action %d\n", i);
+ return i;
+ }
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_init_host_p(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host_p = am65_common_get_host(common);
+
+ host_p->common = common;
+ host_p->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE;
+ host_p->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE;
+
+ return 0;
+}
+
+static int am65_cpsw_am654_get_efuse_macid(struct device_node *of_node,
+ int slave, u8 *mac_addr)
+{
+ u32 mac_lo, mac_hi, offset;
+ struct regmap *syscon;
+ int ret;
+
+ syscon = syscon_regmap_lookup_by_phandle(of_node, "ti,syscon-efuse");
+ if (IS_ERR(syscon)) {
+ if (PTR_ERR(syscon) == -ENODEV)
+ return 0;
+ return PTR_ERR(syscon);
+ }
+
+ ret = of_property_read_u32_index(of_node, "ti,syscon-efuse", 1,
+ &offset);
+ if (ret)
+ return ret;
+
+ regmap_read(syscon, offset, &mac_lo);
+ regmap_read(syscon, offset + 4, &mac_hi);
+
+ mac_addr[0] = (mac_hi >> 8) & 0xff;
+ mac_addr[1] = mac_hi & 0xff;
+ mac_addr[2] = (mac_lo >> 24) & 0xff;
+ mac_addr[3] = (mac_lo >> 16) & 0xff;
+ mac_addr[4] = (mac_lo >> 8) & 0xff;
+ mac_addr[5] = mac_lo & 0xff;
+
+ return 0;
+}
+
+static int am65_cpsw_init_cpts(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct device_node *node;
+ struct am65_cpts *cpts;
+ void __iomem *reg_base;
+
+ if (!IS_ENABLED(CONFIG_TI_K3_AM65_CPTS))
+ return 0;
+
+ node = of_get_child_by_name(dev->of_node, "cpts");
+ if (!node) {
+ dev_err(dev, "%s cpts not found\n", __func__);
+ return -ENOENT;
+ }
+
+ reg_base = common->cpsw_base + AM65_CPSW_NU_CPTS_BASE;
+ cpts = am65_cpts_create(dev, reg_base, node);
+ if (IS_ERR(cpts)) {
+ int ret = PTR_ERR(cpts);
+
+ of_node_put(node);
+ if (ret == -EOPNOTSUPP) {
+ dev_info(dev, "cpts disabled\n");
+ return 0;
+ }
+
+ dev_err(dev, "cpts create err %d\n", ret);
+ return ret;
+ }
+ common->cpts = cpts;
+ /* Forbid PM runtime if CPTS is running.
+ * K3 CPSWxG modules may completely lose context during ON->OFF
+ * transitions depending on integration.
+ * AM65x/J721E MCU CPSW2G: false
+ * J721E MAIN_CPSW9G: true
+ */
+ pm_runtime_forbid(dev);
+
+ return 0;
+}
+
+static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common)
+{
+ struct device_node *node, *port_np;
+ struct device *dev = common->dev;
+ int ret;
+
+ node = of_get_child_by_name(dev->of_node, "ethernet-ports");
+ if (!node)
+ return -ENOENT;
+
+ for_each_child_of_node(node, port_np) {
+ struct am65_cpsw_port *port;
+ u32 port_id;
+
+ /* it is not a slave port node, continue */
+ if (strcmp(port_np->name, "port"))
+ continue;
+
+ ret = of_property_read_u32(port_np, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ port_np, ret);
+ goto of_node_put;
+ }
+
+ if (!port_id || port_id > common->port_num) {
+ dev_err(dev, "%pOF has invalid port_id %u %s\n",
+ port_np, port_id, port_np->name);
+ ret = -EINVAL;
+ goto of_node_put;
+ }
+
+ port = am65_common_get_port(common, port_id);
+ port->port_id = port_id;
+ port->common = common;
+ port->port_base = common->cpsw_base + AM65_CPSW_NU_PORTS_BASE +
+ AM65_CPSW_NU_PORTS_OFFSET * (port_id);
+ if (common->pdata.extra_modes)
+ port->sgmii_base = common->ss_base + AM65_CPSW_SGMII_BASE * (port_id);
+ port->stat_base = common->cpsw_base + AM65_CPSW_NU_STATS_BASE +
+ (AM65_CPSW_NU_STATS_PORT_OFFSET * port_id);
+ port->name = of_get_property(port_np, "label", NULL);
+ port->fetch_ram_base =
+ common->cpsw_base + AM65_CPSW_NU_FRAM_BASE +
+ (AM65_CPSW_NU_FRAM_PORT_OFFSET * (port_id - 1));
+
+ port->slave.mac_sl = cpsw_sl_get("am65", dev, port->port_base);
+ if (IS_ERR(port->slave.mac_sl)) {
+ ret = PTR_ERR(port->slave.mac_sl);
+ goto of_node_put;
+ }
+
+ port->disabled = !of_device_is_available(port_np);
+ if (port->disabled) {
+ common->disabled_ports_mask |= BIT(port->port_id);
+ continue;
+ }
+
+ port->slave.ifphy = devm_of_phy_get(dev, port_np, NULL);
+ if (IS_ERR(port->slave.ifphy)) {
+ ret = PTR_ERR(port->slave.ifphy);
+ dev_err(dev, "%pOF error retrieving port phy: %d\n",
+ port_np, ret);
+ goto of_node_put;
+ }
+
+ port->slave.mac_only =
+ of_property_read_bool(port_np, "ti,mac-only");
+
+ /* get phy/link info */
+ port->slave.phy_node = port_np;
+ ret = of_get_phy_mode(port_np, &port->slave.phy_if);
+ if (ret) {
+ dev_err(dev, "%pOF read phy-mode err %d\n",
+ port_np, ret);
+ goto of_node_put;
+ }
+
+ ret = phy_set_mode_ext(port->slave.ifphy, PHY_MODE_ETHERNET, port->slave.phy_if);
+ if (ret)
+ goto of_node_put;
+
+ ret = of_get_mac_address(port_np, port->slave.mac_addr);
+ if (ret) {
+ am65_cpsw_am654_get_efuse_macid(port_np,
+ port->port_id,
+ port->slave.mac_addr);
+ if (!is_valid_ether_addr(port->slave.mac_addr)) {
+ eth_random_addr(port->slave.mac_addr);
+ dev_err(dev, "Use random MAC address\n");
+ }
+ }
+ }
+ of_node_put(node);
+
+ /* is there at least one ext.port */
+ if (!(~common->disabled_ports_mask & GENMASK(common->port_num, 1))) {
+ dev_err(dev, "No Ext. port are available\n");
+ return -ENODEV;
+ }
+
+ return 0;
+
+of_node_put:
+ of_node_put(port_np);
+ of_node_put(node);
+ return ret;
+}
+
+static void am65_cpsw_pcpu_stats_free(void *data)
+{
+ struct am65_cpsw_ndev_stats __percpu *stats = data;
+
+ free_percpu(stats);
+}
+
+static void am65_cpsw_nuss_phylink_cleanup(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->slave.phylink)
+ phylink_destroy(port->slave.phylink);
+ }
+}
+
+static int
+am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx)
+{
+ struct am65_cpsw_ndev_priv *ndev_priv;
+ struct device *dev = common->dev;
+ struct am65_cpsw_port *port;
+ struct phylink *phylink;
+ int ret;
+
+ port = &common->ports[port_idx];
+
+ if (port->disabled)
+ return 0;
+
+ /* alloc netdev */
+ port->ndev = devm_alloc_etherdev_mqs(common->dev,
+ sizeof(struct am65_cpsw_ndev_priv),
+ AM65_CPSW_MAX_TX_QUEUES,
+ AM65_CPSW_MAX_RX_QUEUES);
+ if (!port->ndev) {
+ dev_err(dev, "error allocating slave net_device %u\n",
+ port->port_id);
+ return -ENOMEM;
+ }
+
+ ndev_priv = netdev_priv(port->ndev);
+ ndev_priv->port = port;
+ ndev_priv->msg_enable = AM65_CPSW_DEBUG;
+ SET_NETDEV_DEV(port->ndev, dev);
+
+ eth_hw_addr_set(port->ndev, port->slave.mac_addr);
+
+ port->ndev->min_mtu = AM65_CPSW_MIN_PACKET_SIZE;
+ port->ndev->max_mtu = AM65_CPSW_MAX_PACKET_SIZE -
+ (VLAN_ETH_HLEN + ETH_FCS_LEN);
+ port->ndev->hw_features = NETIF_F_SG |
+ NETIF_F_RXCSUM |
+ NETIF_F_HW_CSUM |
+ NETIF_F_HW_TC;
+ port->ndev->features = port->ndev->hw_features |
+ NETIF_F_HW_VLAN_CTAG_FILTER;
+ port->ndev->vlan_features |= NETIF_F_SG;
+ port->ndev->netdev_ops = &am65_cpsw_nuss_netdev_ops;
+ port->ndev->ethtool_ops = &am65_cpsw_ethtool_ops_slave;
+
+ /* Configuring Phylink */
+ port->slave.phylink_config.dev = &port->ndev->dev;
+ port->slave.phylink_config.type = PHYLINK_NETDEV;
+ port->slave.phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD;
+
+ if (phy_interface_mode_is_rgmii(port->slave.phy_if)) {
+ phy_interface_set_rgmii(port->slave.phylink_config.supported_interfaces);
+ } else if (port->slave.phy_if == PHY_INTERFACE_MODE_RMII) {
+ __set_bit(PHY_INTERFACE_MODE_RMII,
+ port->slave.phylink_config.supported_interfaces);
+ } else if (common->pdata.extra_modes & BIT(port->slave.phy_if)) {
+ __set_bit(PHY_INTERFACE_MODE_QSGMII,
+ port->slave.phylink_config.supported_interfaces);
+ } else {
+ dev_err(dev, "selected phy-mode is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ phylink = phylink_create(&port->slave.phylink_config,
+ of_node_to_fwnode(port->slave.phy_node),
+ port->slave.phy_if,
+ &am65_cpsw_phylink_mac_ops);
+ if (IS_ERR(phylink))
+ return PTR_ERR(phylink);
+
+ port->slave.phylink = phylink;
+
+ /* Disable TX checksum offload by default due to HW bug */
+ if (common->pdata.quirks & AM65_CPSW_QUIRK_I2027_NO_TX_CSUM)
+ port->ndev->features &= ~NETIF_F_HW_CSUM;
+
+ ndev_priv->stats = netdev_alloc_pcpu_stats(struct am65_cpsw_ndev_stats);
+ if (!ndev_priv->stats)
+ return -ENOMEM;
+
+ ret = devm_add_action_or_reset(dev, am65_cpsw_pcpu_stats_free,
+ ndev_priv->stats);
+ if (ret)
+ dev_err(dev, "failed to add percpu stat free action %d\n", ret);
+
+ if (!common->dma_ndev)
+ common->dma_ndev = port->ndev;
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_init_ndevs(struct am65_cpsw_common *common)
+{
+ int ret;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ ret = am65_cpsw_nuss_init_port_ndev(common, i);
+ if (ret)
+ return ret;
+ }
+
+ netif_napi_add(common->dma_ndev, &common->napi_rx,
+ am65_cpsw_nuss_rx_poll);
+
+ return ret;
+}
+
+static int am65_cpsw_nuss_ndev_add_tx_napi(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ int i, ret = 0;
+
+ for (i = 0; i < common->tx_ch_num; i++) {
+ struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[i];
+
+ netif_napi_add_tx(common->dma_ndev, &tx_chn->napi_tx,
+ am65_cpsw_nuss_tx_poll);
+
+ ret = devm_request_irq(dev, tx_chn->irq,
+ am65_cpsw_nuss_tx_irq,
+ IRQF_TRIGGER_HIGH,
+ tx_chn->tx_chn_name, tx_chn);
+ if (ret) {
+ dev_err(dev, "failure requesting tx%u irq %u, %d\n",
+ tx_chn->id, tx_chn->irq, ret);
+ goto err;
+ }
+ }
+
+err:
+ return ret;
+}
+
+static void am65_cpsw_nuss_cleanup_ndev(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_port *port;
+ int i;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+ if (port->ndev && port->ndev->reg_state == NETREG_REGISTERED)
+ unregister_netdev(port->ndev);
+ }
+}
+
+static void am65_cpsw_port_offload_fwd_mark_update(struct am65_cpsw_common *common)
+{
+ int set_val = 0;
+ int i;
+
+ if (common->br_members == (GENMASK(common->port_num, 1) & ~common->disabled_ports_mask))
+ set_val = 1;
+
+ dev_dbg(common->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = 1; i <= common->port_num; i++) {
+ struct am65_cpsw_port *port = am65_common_get_port(common, i);
+ struct am65_cpsw_ndev_priv *priv;
+
+ if (!port->ndev)
+ continue;
+
+ priv = am65_ndev_to_priv(port->ndev);
+ priv->offload_fwd_mark = set_val;
+ }
+}
+
+bool am65_cpsw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &am65_cpsw_nuss_netdev_ops) {
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+
+ return !common->is_emac_mode;
+ }
+
+ return false;
+}
+
+static int am65_cpsw_netdevice_port_link(struct net_device *ndev,
+ struct net_device *br_ndev,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+ int err;
+
+ if (!common->br_members) {
+ common->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (common->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
+ false, extack);
+ if (err)
+ return err;
+
+ common->br_members |= BIT(priv->port->port_id);
+
+ am65_cpsw_port_offload_fwd_mark_update(common);
+
+ return NOTIFY_DONE;
+}
+
+static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
+
+ switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
+
+ common->br_members &= ~BIT(priv->port->port_id);
+
+ am65_cpsw_port_offload_fwd_mark_update(common);
+
+ if (!common->br_members)
+ common->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int am65_cpsw_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!am65_cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = am65_cpsw_netdevice_port_link(ndev,
+ info->upper_dev,
+ extack);
+ else
+ am65_cpsw_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static int am65_cpsw_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ if (AM65_CPSW_IS_CPSW2G(cpsw) ||
+ !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ return 0;
+
+ cpsw->am65_cpsw_netdevice_nb.notifier_call = &am65_cpsw_netdevice_event;
+ ret = register_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+ if (ret) {
+ dev_err(cpsw->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = am65_cpsw_switchdev_register_notifiers(cpsw);
+ if (ret)
+ unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+
+ return ret;
+}
+
+static void am65_cpsw_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+ if (AM65_CPSW_IS_CPSW2G(cpsw) ||
+ !IS_REACHABLE(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ return;
+
+ am65_cpsw_switchdev_unregister_notifiers(cpsw);
+ unregister_netdevice_notifier(&cpsw->am65_cpsw_netdevice_nb);
+}
+
+static const struct devlink_ops am65_cpsw_devlink_ops = {};
+
+static void am65_cpsw_init_stp_ale_entry(struct am65_cpsw_common *cpsw)
+{
+ cpsw_ale_add_mcast(cpsw->ale, eth_stp_addr, ALE_PORT_HOST, ALE_SUPER, 0,
+ ALE_MCAST_BLOCK_LEARN_FWD);
+}
+
+static void am65_cpsw_init_host_port_switch(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+
+ writel(common->default_vlan, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ am65_cpsw_init_stp_ale_entry(common);
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(common->dev, "Set P0_UNI_FLOOD\n");
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
+}
+
+static void am65_cpsw_init_host_port_emac(struct am65_cpsw_common *common)
+{
+ struct am65_cpsw_host *host = am65_common_get_host(common);
+
+ writel(0, host->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(common->dev, "unset P0_UNI_FLOOD\n");
+
+ /* learning make no sense in multi-mac mode */
+ cpsw_ale_control_set(common->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
+}
+
+static int am65_cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct am65_cpsw_common *common = dl_priv->common;
+
+ dev_dbg(common->dev, "%s id:%u\n", __func__, id);
+
+ if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = !common->is_emac_mode;
+
+ return 0;
+}
+
+static void am65_cpsw_init_port_emac_ale(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_slave_data *slave = &port->slave;
+ struct am65_cpsw_common *common = port->common;
+ u32 port_mask;
+
+ writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ if (slave->mac_only)
+ /* enable mac-only mode on port */
+ cpsw_ale_control_set(common->ale, port->port_id,
+ ALE_PORT_MACONLY, 1);
+
+ cpsw_ale_control_set(common->ale, port->port_id, ALE_PORT_NOLEARN, 1);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+
+ cpsw_ale_add_ucast(common->ale, port->ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE, slave->port_vlan);
+ cpsw_ale_add_mcast(common->ale, port->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan, ALE_MCAST_FWD_2);
+}
+
+static void am65_cpsw_init_port_switch_ale(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_slave_data *slave = &port->slave;
+ struct am65_cpsw_common *cpsw = port->common;
+ u32 port_mask;
+
+ cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_NOLEARN, 0);
+
+ cpsw_ale_add_ucast(cpsw->ale, port->ndev->dev_addr,
+ HOST_PORT_NUM, ALE_SECURE | ALE_BLOCKED | ALE_VLAN,
+ slave->port_vlan);
+
+ port_mask = BIT(port->port_id) | ALE_PORT_HOST;
+
+ cpsw_ale_add_mcast(cpsw->ale, port->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD_2);
+
+ writel(slave->port_vlan, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_MACONLY, 0);
+}
+
+static int am65_cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct am65_cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct am65_cpsw_common *cpsw = dl_priv->common;
+ bool switch_en = ctx->val.vbool;
+ bool if_running = false;
+ int i;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != AM65_CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ if (switch_en == !cpsw->is_emac_mode)
+ return 0;
+
+ if (!switch_en && cpsw->br_members) {
+ dev_err(cpsw->dev, "Remove ports from bridge before disabling switch mode\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+
+ cpsw->is_emac_mode = !switch_en;
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+
+ if (!sl_ndev || !netif_running(sl_ndev))
+ continue;
+
+ if_running = true;
+ }
+
+ if (!if_running) {
+ /* all ndevs are down */
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_slave_data *slave;
+
+ if (!sl_ndev)
+ continue;
+
+ slave = am65_ndev_to_slave(sl_ndev);
+ if (switch_en)
+ slave->port_vlan = cpsw->default_vlan;
+ else
+ slave->port_vlan = 0;
+ }
+
+ goto exit;
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+ /* clean up ALE table */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, HOST_PORT_NUM, ALE_AGEOUT);
+
+ if (switch_en) {
+ dev_info(cpsw->dev, "Enable switch mode\n");
+
+ am65_cpsw_init_host_port_switch(cpsw);
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_slave_data *slave;
+ struct am65_cpsw_port *port;
+
+ if (!sl_ndev)
+ continue;
+
+ port = am65_ndev_to_port(sl_ndev);
+ slave = am65_ndev_to_slave(sl_ndev);
+ slave->port_vlan = cpsw->default_vlan;
+
+ if (netif_running(sl_ndev))
+ am65_cpsw_init_port_switch_ale(port);
+ }
+
+ } else {
+ dev_info(cpsw->dev, "Disable switch mode\n");
+
+ am65_cpsw_init_host_port_emac(cpsw);
+
+ for (i = 0; i < cpsw->port_num; i++) {
+ struct net_device *sl_ndev = cpsw->ports[i].ndev;
+ struct am65_cpsw_port *port;
+
+ if (!sl_ndev)
+ continue;
+
+ port = am65_ndev_to_port(sl_ndev);
+ port->slave.port_vlan = 0;
+ if (netif_running(sl_ndev))
+ am65_cpsw_init_port_emac_ale(port);
+ }
+ }
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_BYPASS, 0);
+exit:
+ rtnl_unlock();
+
+ return 0;
+}
+
+static const struct devlink_param am65_cpsw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(AM65_CPSW_DL_PARAM_SWITCH_MODE, "switch_mode",
+ DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ am65_cpsw_dl_switch_mode_get,
+ am65_cpsw_dl_switch_mode_set, NULL),
+};
+
+static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common)
+{
+ struct devlink_port_attrs attrs = {};
+ struct am65_cpsw_devlink *dl_priv;
+ struct device *dev = common->dev;
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int ret = 0;
+ int i;
+
+ common->devlink =
+ devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev);
+ if (!common->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(common->devlink);
+ dl_priv->common = common;
+
+ /* Provide devlink hook to switch mode when multiple external ports
+ * are present NUSS switchdev driver is enabled.
+ */
+ if (!AM65_CPSW_IS_CPSW2G(common) &&
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) {
+ ret = devlink_params_register(common->devlink,
+ am65_cpsw_devlink_params,
+ ARRAY_SIZE(am65_cpsw_devlink_params));
+ if (ret) {
+ dev_err(dev, "devlink params reg fail ret:%d\n", ret);
+ goto dl_unreg;
+ }
+ }
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ if (port->ndev)
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ else
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
+ attrs.phys.port_number = port->port_id;
+ attrs.switch_id.id_len = sizeof(resource_size_t);
+ memcpy(attrs.switch_id.id, common->switch_id, attrs.switch_id.id_len);
+ devlink_port_attrs_set(dl_port, &attrs);
+
+ ret = devlink_port_register(common->devlink, dl_port, port->port_id);
+ if (ret) {
+ dev_err(dev, "devlink_port reg fail for port %d, ret:%d\n",
+ port->port_id, ret);
+ goto dl_port_unreg;
+ }
+ }
+ devlink_register(common->devlink);
+ return ret;
+
+dl_port_unreg:
+ for (i = i - 1; i >= 1; i--) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ devlink_port_unregister(dl_port);
+ }
+dl_unreg:
+ devlink_free(common->devlink);
+ return ret;
+}
+
+static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common)
+{
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int i;
+
+ devlink_unregister(common->devlink);
+
+ for (i = 1; i <= common->port_num; i++) {
+ port = am65_common_get_port(common, i);
+ dl_port = &port->devlink_port;
+
+ devlink_port_unregister(dl_port);
+ }
+
+ if (!AM65_CPSW_IS_CPSW2G(common) &&
+ IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV))
+ devlink_params_unregister(common->devlink,
+ am65_cpsw_devlink_params,
+ ARRAY_SIZE(am65_cpsw_devlink_params));
+
+ devlink_free(common->devlink);
+}
+
+static int am65_cpsw_nuss_register_ndevs(struct am65_cpsw_common *common)
+{
+ struct device *dev = common->dev;
+ struct devlink_port *dl_port;
+ struct am65_cpsw_port *port;
+ int ret = 0, i;
+
+ ret = am65_cpsw_nuss_ndev_add_tx_napi(common);
+ if (ret)
+ return ret;
+
+ ret = devm_request_irq(dev, common->rx_chns.irq,
+ am65_cpsw_nuss_rx_irq,
+ IRQF_TRIGGER_HIGH, dev_name(dev), common);
+ if (ret) {
+ dev_err(dev, "failure requesting rx irq %u, %d\n",
+ common->rx_chns.irq, ret);
+ return ret;
+ }
+
+ ret = am65_cpsw_nuss_register_devlink(common);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < common->port_num; i++) {
+ port = &common->ports[i];
+
+ if (!port->ndev)
+ continue;
+
+ ret = register_netdev(port->ndev);
+ if (ret) {
+ dev_err(dev, "error registering slave net device%i %d\n",
+ i, ret);
+ goto err_cleanup_ndev;
+ }
+
+ dl_port = &port->devlink_port;
+ devlink_port_type_eth_set(dl_port, port->ndev);
+ }
+
+ ret = am65_cpsw_register_notifiers(common);
+ if (ret)
+ goto err_cleanup_ndev;
+
+ /* can't auto unregister ndev using devm_add_action() due to
+ * devres release sequence in DD core for DMA
+ */
+
+ return 0;
+
+err_cleanup_ndev:
+ am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_unregister_devlink(common);
+
+ return ret;
+}
+
+int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx)
+{
+ int ret;
+
+ common->tx_ch_num = num_tx;
+ ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ return ret;
+
+ return am65_cpsw_nuss_ndev_add_tx_napi(common);
+}
+
+struct am65_cpsw_soc_pdata {
+ u32 quirks_dis;
+};
+
+static const struct am65_cpsw_soc_pdata am65x_soc_sr2_0 = {
+ .quirks_dis = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
+};
+
+static const struct soc_device_attribute am65_cpsw_socinfo[] = {
+ { .family = "AM65X",
+ .revision = "SR2.0",
+ .data = &am65x_soc_sr2_0
+ },
+ {/* sentinel */}
+};
+
+static const struct am65_cpsw_pdata am65x_sr1_0 = {
+ .quirks = AM65_CPSW_QUIRK_I2027_NO_TX_CSUM,
+ .ale_dev_id = "am65x-cpsw2g",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+};
+
+static const struct am65_cpsw_pdata j721e_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am65x-cpsw2g",
+ .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE,
+};
+
+static const struct am65_cpsw_pdata am64x_cpswxg_pdata = {
+ .quirks = AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+};
+
+static const struct am65_cpsw_pdata j7200_cpswxg_pdata = {
+ .quirks = 0,
+ .ale_dev_id = "am64-cpswxg",
+ .fdqring_mode = K3_RINGACC_RING_MODE_RING,
+ .extra_modes = BIT(PHY_INTERFACE_MODE_QSGMII),
+};
+
+static const struct of_device_id am65_cpsw_nuss_of_mtable[] = {
+ { .compatible = "ti,am654-cpsw-nuss", .data = &am65x_sr1_0},
+ { .compatible = "ti,j721e-cpsw-nuss", .data = &j721e_pdata},
+ { .compatible = "ti,am642-cpsw-nuss", .data = &am64x_cpswxg_pdata},
+ { .compatible = "ti,j7200-cpswxg-nuss", .data = &j7200_cpswxg_pdata},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, am65_cpsw_nuss_of_mtable);
+
+static void am65_cpsw_nuss_apply_socinfo(struct am65_cpsw_common *common)
+{
+ const struct soc_device_attribute *soc;
+
+ soc = soc_device_match(am65_cpsw_socinfo);
+ if (soc && soc->data) {
+ const struct am65_cpsw_soc_pdata *socdata = soc->data;
+
+ /* disable quirks */
+ common->pdata.quirks &= ~socdata->quirks_dis;
+ }
+}
+
+static int am65_cpsw_nuss_probe(struct platform_device *pdev)
+{
+ struct cpsw_ale_params ale_params = { 0 };
+ const struct of_device_id *of_id;
+ struct device *dev = &pdev->dev;
+ struct am65_cpsw_common *common;
+ struct device_node *node;
+ struct resource *res;
+ struct clk *clk;
+ u64 id_temp;
+ int ret, i;
+
+ common = devm_kzalloc(dev, sizeof(struct am65_cpsw_common), GFP_KERNEL);
+ if (!common)
+ return -ENOMEM;
+ common->dev = dev;
+
+ of_id = of_match_device(am65_cpsw_nuss_of_mtable, dev);
+ if (!of_id)
+ return -EINVAL;
+ common->pdata = *(const struct am65_cpsw_pdata *)of_id->data;
+
+ am65_cpsw_nuss_apply_socinfo(common);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpsw_nuss");
+ common->ss_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(common->ss_base))
+ return PTR_ERR(common->ss_base);
+ common->cpsw_base = common->ss_base + AM65_CPSW_CPSW_NU_BASE;
+ /* Use device's physical base address as switch id */
+ id_temp = cpu_to_be64(res->start);
+ memcpy(common->switch_id, &id_temp, sizeof(res->start));
+
+ node = of_get_child_by_name(dev->of_node, "ethernet-ports");
+ if (!node)
+ return -ENOENT;
+ common->port_num = of_get_child_count(node);
+ of_node_put(node);
+ if (common->port_num < 1 || common->port_num > AM65_CPSW_MAX_PORTS)
+ return -ENOENT;
+
+ common->rx_flow_id_base = -1;
+ init_completion(&common->tdown_complete);
+ common->tx_ch_num = 1;
+ common->pf_p0_rx_ptype_rrobin = false;
+ common->default_vlan = 1;
+
+ common->ports = devm_kcalloc(dev, common->port_num,
+ sizeof(*common->ports),
+ GFP_KERNEL);
+ if (!common->ports)
+ return -ENOMEM;
+
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk))
+ return dev_err_probe(dev, PTR_ERR(clk), "getting fck clock\n");
+ common->bus_freq = clk_get_rate(clk);
+
+ pm_runtime_enable(dev);
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ node = of_get_child_by_name(dev->of_node, "mdio");
+ if (!node) {
+ dev_warn(dev, "MDIO node not found\n");
+ } else if (of_device_is_available(node)) {
+ struct platform_device *mdio_pdev;
+
+ mdio_pdev = of_platform_device_create(node, NULL, dev);
+ if (!mdio_pdev) {
+ ret = -ENODEV;
+ goto err_pm_clear;
+ }
+
+ common->mdio_dev = &mdio_pdev->dev;
+ }
+ of_node_put(node);
+
+ am65_cpsw_nuss_get_ver(common);
+
+ /* init tx channels */
+ ret = am65_cpsw_nuss_init_tx_chns(common);
+ if (ret)
+ goto err_of_clear;
+ ret = am65_cpsw_nuss_init_rx_chns(common);
+ if (ret)
+ goto err_of_clear;
+
+ ret = am65_cpsw_nuss_init_host_p(common);
+ if (ret)
+ goto err_of_clear;
+
+ ret = am65_cpsw_nuss_init_slave_ports(common);
+ if (ret)
+ goto err_of_clear;
+
+ /* init common data */
+ ale_params.dev = dev;
+ ale_params.ale_ageout = AM65_CPSW_ALE_AGEOUT_DEFAULT;
+ ale_params.ale_ports = common->port_num + 1;
+ ale_params.ale_regs = common->cpsw_base + AM65_CPSW_NU_ALE_BASE;
+ ale_params.dev_id = common->pdata.ale_dev_id;
+ ale_params.bus_freq = common->bus_freq;
+
+ common->ale = cpsw_ale_create(&ale_params);
+ if (IS_ERR(common->ale)) {
+ dev_err(dev, "error initializing ale engine\n");
+ ret = PTR_ERR(common->ale);
+ goto err_of_clear;
+ }
+
+ ret = am65_cpsw_init_cpts(common);
+ if (ret)
+ goto err_of_clear;
+
+ /* init ports */
+ for (i = 0; i < common->port_num; i++)
+ am65_cpsw_nuss_slave_disable_unused(&common->ports[i]);
+
+ dev_set_drvdata(dev, common);
+
+ common->is_emac_mode = true;
+
+ ret = am65_cpsw_nuss_init_ndevs(common);
+ if (ret)
+ goto err_free_phylink;
+
+ ret = am65_cpsw_nuss_register_ndevs(common);
+ if (ret)
+ goto err_free_phylink;
+
+ pm_runtime_put(dev);
+ return 0;
+
+err_free_phylink:
+ am65_cpsw_nuss_phylink_cleanup(common);
+ am65_cpts_release(common->cpts);
+err_of_clear:
+ if (common->mdio_dev)
+ of_platform_device_destroy(common->mdio_dev, NULL);
+err_pm_clear:
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int am65_cpsw_nuss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct am65_cpsw_common *common;
+ int ret;
+
+ common = dev_get_drvdata(dev);
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ am65_cpsw_unregister_devlink(common);
+ am65_cpsw_unregister_notifiers(common);
+
+ /* must unregister ndevs here because DD release_driver routine calls
+ * dma_deconfigure(dev) before devres_release_all(dev)
+ */
+ am65_cpsw_nuss_cleanup_ndev(common);
+ am65_cpsw_nuss_phylink_cleanup(common);
+ am65_cpts_release(common->cpts);
+
+ if (common->mdio_dev)
+ of_platform_device_destroy(common->mdio_dev, NULL);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_driver am65_cpsw_nuss_driver = {
+ .driver = {
+ .name = AM65_CPSW_DRV_NAME,
+ .of_match_table = am65_cpsw_nuss_of_mtable,
+ },
+ .probe = am65_cpsw_nuss_probe,
+ .remove = am65_cpsw_nuss_remove,
+};
+
+module_platform_driver(am65_cpsw_nuss_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI AM65 CPSW Ethernet driver");
diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
new file mode 100644
index 000000000..a9610e130
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h
@@ -0,0 +1,193 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ */
+
+#ifndef AM65_CPSW_NUSS_H_
+#define AM65_CPSW_NUSS_H_
+
+#include <linux/if_ether.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/phylink.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <net/devlink.h>
+#include "am65-cpsw-qos.h"
+
+struct am65_cpts;
+
+#define HOST_PORT_NUM 0
+
+#define AM65_CPSW_MAX_TX_QUEUES 8
+#define AM65_CPSW_MAX_RX_QUEUES 1
+#define AM65_CPSW_MAX_RX_FLOWS 1
+
+#define AM65_CPSW_PORT_VLAN_REG_OFFSET 0x014
+
+struct am65_cpsw_slave_data {
+ bool mac_only;
+ struct cpsw_sl *mac_sl;
+ struct device_node *phy_node;
+ phy_interface_t phy_if;
+ struct phy *ifphy;
+ bool rx_pause;
+ bool tx_pause;
+ u8 mac_addr[ETH_ALEN];
+ int port_vlan;
+ struct phylink *phylink;
+ struct phylink_config phylink_config;
+};
+
+struct am65_cpsw_port {
+ struct am65_cpsw_common *common;
+ struct net_device *ndev;
+ const char *name;
+ u32 port_id;
+ void __iomem *port_base;
+ void __iomem *sgmii_base;
+ void __iomem *stat_base;
+ void __iomem *fetch_ram_base;
+ bool disabled;
+ struct am65_cpsw_slave_data slave;
+ bool tx_ts_enabled;
+ bool rx_ts_enabled;
+ struct am65_cpsw_qos qos;
+ struct devlink_port devlink_port;
+};
+
+struct am65_cpsw_host {
+ struct am65_cpsw_common *common;
+ void __iomem *port_base;
+ void __iomem *stat_base;
+};
+
+struct am65_cpsw_tx_chn {
+ struct device *dma_dev;
+ struct napi_struct napi_tx;
+ struct am65_cpsw_common *common;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_tx_channel *tx_chn;
+ spinlock_t lock; /* protect TX rings in multi-port mode */
+ int irq;
+ u32 id;
+ u32 descs_num;
+ char tx_chn_name[128];
+};
+
+struct am65_cpsw_rx_chn {
+ struct device *dev;
+ struct device *dma_dev;
+ struct k3_cppi_desc_pool *desc_pool;
+ struct k3_udma_glue_rx_channel *rx_chn;
+ u32 descs_num;
+ int irq;
+};
+
+#define AM65_CPSW_QUIRK_I2027_NO_TX_CSUM BIT(0)
+#define AM64_CPSW_QUIRK_DMA_RX_TDOWN_IRQ BIT(1)
+
+struct am65_cpsw_pdata {
+ u32 quirks;
+ u64 extra_modes;
+ enum k3_ring_mode fdqring_mode;
+ const char *ale_dev_id;
+};
+
+enum cpsw_devlink_param_id {
+ AM65_CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ AM65_CPSW_DL_PARAM_SWITCH_MODE,
+};
+
+struct am65_cpsw_devlink {
+ struct am65_cpsw_common *common;
+};
+
+struct am65_cpsw_common {
+ struct device *dev;
+ struct device *mdio_dev;
+ struct am65_cpsw_pdata pdata;
+
+ void __iomem *ss_base;
+ void __iomem *cpsw_base;
+
+ u32 port_num;
+ struct am65_cpsw_host host;
+ struct am65_cpsw_port *ports;
+ u32 disabled_ports_mask;
+ struct net_device *dma_ndev;
+
+ int usage_count; /* number of opened ports */
+ struct cpsw_ale *ale;
+ int tx_ch_num;
+ u32 rx_flow_id_base;
+
+ struct am65_cpsw_tx_chn tx_chns[AM65_CPSW_MAX_TX_QUEUES];
+ struct completion tdown_complete;
+ atomic_t tdown_cnt;
+
+ struct am65_cpsw_rx_chn rx_chns;
+ struct napi_struct napi_rx;
+
+ bool rx_irq_disabled;
+
+ u32 nuss_ver;
+ u32 cpsw_ver;
+ unsigned long bus_freq;
+ bool pf_p0_rx_ptype_rrobin;
+ struct am65_cpts *cpts;
+ int est_enabled;
+
+ bool is_emac_mode;
+ u16 br_members;
+ int default_vlan;
+ struct devlink *devlink;
+ struct net_device *hw_bridge_dev;
+ struct notifier_block am65_cpsw_netdevice_nb;
+ unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN];
+};
+
+struct am65_cpsw_ndev_stats {
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_packets;
+ u64 rx_bytes;
+ struct u64_stats_sync syncp;
+};
+
+struct am65_cpsw_ndev_priv {
+ u32 msg_enable;
+ struct am65_cpsw_port *port;
+ struct am65_cpsw_ndev_stats __percpu *stats;
+ bool offload_fwd_mark;
+};
+
+#define am65_ndev_to_priv(ndev) \
+ ((struct am65_cpsw_ndev_priv *)netdev_priv(ndev))
+#define am65_ndev_to_port(ndev) (am65_ndev_to_priv(ndev)->port)
+#define am65_ndev_to_common(ndev) (am65_ndev_to_port(ndev)->common)
+#define am65_ndev_to_slave(ndev) (&am65_ndev_to_port(ndev)->slave)
+
+#define am65_common_get_host(common) (&(common)->host)
+#define am65_common_get_port(common, id) (&(common)->ports[(id) - 1])
+
+#define am65_cpsw_napi_to_common(pnapi) \
+ container_of(pnapi, struct am65_cpsw_common, napi_rx)
+#define am65_cpsw_napi_to_tx_chn(pnapi) \
+ container_of(pnapi, struct am65_cpsw_tx_chn, napi_tx)
+
+#define AM65_CPSW_DRV_NAME "am65-cpsw-nuss"
+
+#define AM65_CPSW_IS_CPSW2G(common) ((common)->port_num == 1)
+
+extern const struct ethtool_ops am65_cpsw_ethtool_ops_slave;
+
+void am65_cpsw_nuss_adjust_link(struct net_device *ndev);
+void am65_cpsw_nuss_set_p0_ptype(struct am65_cpsw_common *common);
+void am65_cpsw_nuss_remove_tx_chns(struct am65_cpsw_common *common);
+int am65_cpsw_nuss_update_tx_chns(struct am65_cpsw_common *common, int num_tx);
+
+bool am65_cpsw_port_dev_check(const struct net_device *dev);
+
+#endif /* AM65_CPSW_NUSS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.c b/drivers/net/ethernet/ti/am65-cpsw-qos.c
new file mode 100644
index 000000000..e16277189
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.c
@@ -0,0 +1,799 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments K3 AM65 Ethernet QoS submodule
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * quality of service module includes:
+ * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
+ */
+
+#include <linux/pm_runtime.h>
+#include <linux/time.h>
+#include <net/pkt_cls.h>
+
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-qos.h"
+#include "am65-cpts.h"
+#include "cpsw_ale.h"
+
+#define AM65_CPSW_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_CTL 0x004
+#define AM65_CPSW_PN_REG_FIFO_STATUS 0x050
+#define AM65_CPSW_PN_REG_EST_CTL 0x060
+
+/* AM65_CPSW_REG_CTL register fields */
+#define AM65_CPSW_CTL_EST_EN BIT(18)
+
+/* AM65_CPSW_PN_REG_CTL register fields */
+#define AM65_CPSW_PN_CTL_EST_PORT_EN BIT(17)
+
+/* AM65_CPSW_PN_REG_EST_CTL register fields */
+#define AM65_CPSW_PN_EST_ONEBUF BIT(0)
+#define AM65_CPSW_PN_EST_BUFSEL BIT(1)
+#define AM65_CPSW_PN_EST_TS_EN BIT(2)
+#define AM65_CPSW_PN_EST_TS_FIRST BIT(3)
+#define AM65_CPSW_PN_EST_ONEPRI BIT(4)
+#define AM65_CPSW_PN_EST_TS_PRI_MSK GENMASK(7, 5)
+
+/* AM65_CPSW_PN_REG_FIFO_STATUS register fields */
+#define AM65_CPSW_PN_FST_TX_PRI_ACTIVE_MSK GENMASK(7, 0)
+#define AM65_CPSW_PN_FST_TX_E_MAC_ALLOW_MSK GENMASK(15, 8)
+#define AM65_CPSW_PN_FST_EST_CNT_ERR BIT(16)
+#define AM65_CPSW_PN_FST_EST_ADD_ERR BIT(17)
+#define AM65_CPSW_PN_FST_EST_BUFACT BIT(18)
+
+/* EST FETCH COMMAND RAM */
+#define AM65_CPSW_FETCH_RAM_CMD_NUM 0x80
+#define AM65_CPSW_FETCH_CNT_MSK GENMASK(21, 8)
+#define AM65_CPSW_FETCH_CNT_MAX (AM65_CPSW_FETCH_CNT_MSK >> 8)
+#define AM65_CPSW_FETCH_CNT_OFFSET 8
+#define AM65_CPSW_FETCH_ALLOW_MSK GENMASK(7, 0)
+#define AM65_CPSW_FETCH_ALLOW_MAX AM65_CPSW_FETCH_ALLOW_MSK
+
+enum timer_act {
+ TACT_PROG, /* need program timer */
+ TACT_NEED_STOP, /* need stop first */
+ TACT_SKIP_PROG, /* just buffer can be updated */
+};
+
+static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
+{
+ return port->qos.est_oper || port->qos.est_admin;
+}
+
+static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
+{
+ u32 val;
+
+ val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
+
+ if (enable)
+ val |= AM65_CPSW_CTL_EST_EN;
+ else
+ val &= ~AM65_CPSW_CTL_EST_EN;
+
+ writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
+ common->est_enabled = enable;
+}
+
+static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
+{
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
+ if (enable)
+ val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
+ else
+ val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
+}
+
+/* target new EST RAM buffer, actual toggle happens after cycle completion */
+static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
+ int buf_num)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ if (buf_num)
+ val |= AM65_CPSW_PN_EST_BUFSEL;
+ else
+ val &= ~AM65_CPSW_PN_EST_BUFSEL;
+
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+}
+
+/* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
+ * admin -> oper or not
+ *
+ * Return true if already transitioned. i.e oper is equal to admin and buf
+ * numbers match (est_oper->buf match with est_admin->buf).
+ * false if before transition. i.e oper is not equal to admin, (i.e a
+ * previous admin command is waiting to be transitioned to oper state
+ * and est_oper->buf not match with est_oper->buf).
+ */
+static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
+ int *admin)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
+ *oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ *admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
+
+ return *admin == *oper;
+}
+
+/* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
+ * Admin to program the new schedule.
+ *
+ * Logic as follows:-
+ * If oper is same as admin, return the other buffer (!oper) as the admin
+ * buffer. If oper is not the same, driver let the current oper to continue
+ * as it is in the process of transitioning from admin -> oper. So keep the
+ * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
+ * EST CTL register. In the second iteration they will match and code returns.
+ * The actual buffer to write command is selected later before it is ready
+ * to update the schedule.
+ */
+static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
+{
+ int oper, admin;
+ int roll = 2;
+
+ while (roll--) {
+ if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return !oper;
+
+ /* admin is not set, so hinder transition as it's not allowed
+ * to touch memory in-flight, by targeting same oper buf.
+ */
+ am65_cpsw_port_est_assign_buf_num(ndev, oper);
+
+ dev_info(&ndev->dev,
+ "Prev. EST admin cycle is in transit %d -> %d\n",
+ oper, admin);
+ }
+
+ return admin;
+}
+
+static void am65_cpsw_admin_to_oper(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = port->qos.est_admin;
+ port->qos.est_admin = NULL;
+}
+
+static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 val;
+
+ val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+ val &= ~AM65_CPSW_PN_EST_ONEBUF;
+ writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
+
+ est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
+
+ /* rolled buf num means changed buf while configuring */
+ if (port->qos.est_oper && port->qos.est_admin &&
+ est_new->buf == port->qos.est_oper->buf)
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+static void am65_cpsw_est_set(struct net_device *ndev, int enable)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ int common_enable = 0;
+ int i;
+
+ am65_cpsw_port_est_enable(port, enable);
+
+ for (i = 0; i < common->port_num; i++)
+ common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
+
+ common_enable |= enable;
+ am65_cpsw_est_enable(common, common_enable);
+}
+
+/* This update is supposed to be used in any routine before getting real state
+ * of admin -> oper transition, particularly it's supposed to be used in some
+ * generic routine for providing real state to Taprio Qdisc.
+ */
+static void am65_cpsw_est_update_state(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int oper, admin;
+
+ if (!port->qos.est_admin)
+ return;
+
+ if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
+ return;
+
+ am65_cpsw_admin_to_oper(ndev);
+}
+
+/* Fetch command count it's number of bytes in Gigabit mode or nibbles in
+ * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
+ * bytes/nibbles that can be sent while transmission on given speed.
+ */
+static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
+{
+ u64 temp;
+
+ temp = ns * link_speed;
+ if (link_speed < SPEED_1000)
+ temp <<= 1;
+
+ return DIV_ROUND_UP(temp, 8 * 1000);
+}
+
+static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
+ int fetch_cnt,
+ int fetch_allow)
+{
+ u32 prio_mask, cmd_fetch_cnt, cmd;
+
+ do {
+ if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
+ fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
+ cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
+ } else {
+ cmd_fetch_cnt = fetch_cnt;
+ /* fetch count can't be less than 16? */
+ if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
+ cmd_fetch_cnt = 16;
+
+ fetch_cnt = 0;
+ }
+
+ prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
+ cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
+
+ writel(cmd, addr);
+ addr += 4;
+ } while (fetch_cnt);
+
+ return addr;
+}
+
+static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
+ struct tc_taprio_qopt_offload *taprio,
+ int link_speed)
+{
+ int i, cmd_cnt, cmd_sum = 0;
+ u32 fetch_cnt;
+
+ for (i = 0; i < taprio->num_entries; i++) {
+ if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
+ dev_err(&ndev->dev, "Only SET command is supported");
+ return -EINVAL;
+ }
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
+ link_speed);
+
+ cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
+ if (!cmd_cnt)
+ cmd_cnt++;
+
+ cmd_sum += cmd_cnt;
+
+ if (!fetch_cnt)
+ break;
+ }
+
+ return cmd_sum;
+}
+
+static int am65_cpsw_est_check_scheds(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int cmd_num;
+
+ cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
+ port->qos.link_speed);
+ if (cmd_num < 0)
+ return cmd_num;
+
+ if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
+ dev_err(&ndev->dev, "No fetch RAM");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
+ void __iomem *ram_addr, *max_ram_addr;
+ struct tc_taprio_sched_entry *entry;
+ int i, ram_size;
+
+ ram_addr = port->fetch_ram_base;
+ ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
+ ram_addr += est_new->buf * ram_size;
+
+ max_ram_addr = ram_size + ram_addr;
+ for (i = 0; i < est_new->taprio.num_entries; i++) {
+ entry = &est_new->taprio.entries[i];
+
+ fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
+ port->qos.link_speed);
+ fetch_allow = entry->gate_mask;
+ if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
+ dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
+ fetch_allow);
+
+ ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
+ fetch_allow);
+
+ if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
+ dev_info(&ndev->dev,
+ "next scheds after %d have no impact", i + 1);
+ break;
+ }
+
+ all_fetch_allow |= fetch_allow;
+ }
+
+ /* end cmd, enabling non-timed queues for potential over cycle time */
+ if (ram_addr < max_ram_addr)
+ writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
+}
+
+/*
+ * Enable ESTf periodic output, set cycle start time and interval.
+ */
+static int am65_cpsw_timer_set(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+ struct am65_cpts *cpts = common->cpts;
+ struct am65_cpts_estf_cfg cfg;
+
+ cfg.ns_period = est_new->taprio.cycle_time;
+ cfg.ns_start = est_new->taprio.base_time;
+
+ return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
+}
+
+static void am65_cpsw_timer_stop(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+
+ am65_cpts_estf_disable(cpts, port->port_id - 1);
+}
+
+static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpts *cpts = port->common->cpts;
+ u64 cur_time;
+ s64 diff;
+
+ if (!port->qos.est_oper)
+ return TACT_PROG;
+
+ taprio_new = &est_new->taprio;
+ taprio_oper = &port->qos.est_oper->taprio;
+
+ if (taprio_new->cycle_time != taprio_oper->cycle_time)
+ return TACT_NEED_STOP;
+
+ /* in order to avoid timer reset get base_time form oper taprio */
+ if (!taprio_new->base_time && taprio_oper)
+ taprio_new->base_time = taprio_oper->base_time;
+
+ if (taprio_new->base_time == taprio_oper->base_time)
+ return TACT_SKIP_PROG;
+
+ /* base times are cycle synchronized */
+ diff = taprio_new->base_time - taprio_oper->base_time;
+ diff = diff < 0 ? -diff : diff;
+ if (diff % taprio_new->cycle_time)
+ return TACT_NEED_STOP;
+
+ cur_time = am65_cpts_ns_gettime(cpts);
+ if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
+ return TACT_SKIP_PROG;
+
+ /* TODO: Admin schedule at future time is not currently supported */
+ return TACT_NEED_STOP;
+}
+
+static void am65_cpsw_stop_est(struct net_device *ndev)
+{
+ am65_cpsw_est_set(ndev, 0);
+ am65_cpsw_timer_stop(ndev);
+}
+
+static void am65_cpsw_purge_est(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ am65_cpsw_stop_est(ndev);
+
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+ devm_kfree(&ndev->dev, port->qos.est_oper);
+
+ port->qos.est_oper = NULL;
+ port->qos.est_admin = NULL;
+}
+
+static int am65_cpsw_configure_taprio(struct net_device *ndev,
+ struct am65_cpsw_est *est_new)
+{
+ struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
+ struct am65_cpts *cpts = common->cpts;
+ int ret = 0, tact = TACT_PROG;
+
+ am65_cpsw_est_update_state(ndev);
+
+ if (!est_new->taprio.enable) {
+ am65_cpsw_stop_est(ndev);
+ return ret;
+ }
+
+ ret = am65_cpsw_est_check_scheds(ndev, est_new);
+ if (ret < 0)
+ return ret;
+
+ tact = am65_cpsw_timer_act(ndev, est_new);
+ if (tact == TACT_NEED_STOP) {
+ dev_err(&ndev->dev,
+ "Can't toggle estf timer, stop taprio first");
+ return -EINVAL;
+ }
+
+ if (tact == TACT_PROG)
+ am65_cpsw_timer_stop(ndev);
+
+ if (!est_new->taprio.base_time)
+ est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
+
+ am65_cpsw_port_est_get_buf_num(ndev, est_new);
+ am65_cpsw_est_set_sched_list(ndev, est_new);
+ am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
+
+ am65_cpsw_est_set(ndev, est_new->taprio.enable);
+
+ if (tact == TACT_PROG) {
+ ret = am65_cpsw_timer_set(ndev, est_new);
+ if (ret) {
+ dev_err(&ndev->dev, "Failed to set cycle time");
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
+ struct tc_taprio_qopt_offload *to)
+{
+ int i;
+
+ *to = *from;
+ for (i = 0; i < from->num_entries; i++)
+ to->entries[i] = from->entries[i];
+}
+
+static int am65_cpsw_set_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct tc_taprio_qopt_offload *taprio = type_data;
+ struct am65_cpsw_est *est_new;
+ int ret = 0;
+
+ if (taprio->cycle_time_extension) {
+ dev_err(&ndev->dev, "Failed to set cycle time extension");
+ return -EOPNOTSUPP;
+ }
+
+ est_new = devm_kzalloc(&ndev->dev,
+ struct_size(est_new, taprio.entries, taprio->num_entries),
+ GFP_KERNEL);
+ if (!est_new)
+ return -ENOMEM;
+
+ am65_cpsw_cp_taprio(taprio, &est_new->taprio);
+ ret = am65_cpsw_configure_taprio(ndev, est_new);
+ if (!ret) {
+ if (taprio->enable) {
+ devm_kfree(&ndev->dev, port->qos.est_admin);
+
+ port->qos.est_admin = est_new;
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ am65_cpsw_purge_est(ndev);
+ }
+ } else {
+ devm_kfree(&ndev->dev, est_new);
+ }
+
+ return ret;
+}
+
+static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ ktime_t cur_time;
+ s64 delta;
+
+ port->qos.link_speed = link_speed;
+ if (!am65_cpsw_port_est_enabled(port))
+ return;
+
+ if (port->qos.link_down_time) {
+ cur_time = ktime_get();
+ delta = ktime_us_delta(cur_time, port->qos.link_down_time);
+ if (delta > USEC_PER_SEC) {
+ dev_err(&ndev->dev,
+ "Link has been lost too long, stopping TAS");
+ goto purge_est;
+ }
+ }
+
+ return;
+
+purge_est:
+ am65_cpsw_purge_est(ndev);
+}
+
+static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct am65_cpsw_common *common = port->common;
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return -ENODEV;
+
+ if (!netif_running(ndev)) {
+ dev_err(&ndev->dev, "interface is down, link speed unknown\n");
+ return -ENETDOWN;
+ }
+
+ if (common->pf_p0_rx_ptype_rrobin) {
+ dev_err(&ndev->dev,
+ "p0-rx-ptype-rrobin flag conflicts with taprio qdisc\n");
+ return -EINVAL;
+ }
+
+ if (port->qos.link_speed == SPEED_UNKNOWN)
+ return -ENOLINK;
+
+ return am65_cpsw_set_taprio(ndev, type_data);
+}
+
+static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct am65_cpsw_qos *qos = &port->qos;
+ struct flow_match_eth_addrs match;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_bc_ratelimit.cookie = cls->cookie;
+ qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ qos->ale_mc_ratelimit.cookie = cls->cookie;
+ qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack,
+ "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
+{
+ struct am65_cpsw_qos *qos = &port->qos;
+
+ if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
+ qos->ale_bc_ratelimit.cookie = 0;
+ qos->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
+ }
+
+ if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
+ qos->ale_mc_ratelimit.cookie = 0;
+ qos->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
+ struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return am65_cpsw_qos_configure_clsflower(port, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return am65_cpsw_qos_delete_clsflower(port, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct am65_cpsw_port *port = cb_priv;
+
+ if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static LIST_HEAD(am65_cpsw_qos_block_cb_list);
+
+static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
+ am65_cpsw_qos_setup_tc_block_cb,
+ port, port, true);
+}
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_TAPRIO:
+ return am65_cpsw_setup_taprio(ndev, type_data);
+ case TC_SETUP_BLOCK:
+ return am65_cpsw_qos_setup_tc_block(ndev, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ am65_cpsw_est_link_up(ndev, link_speed);
+ port->qos.link_down_time = 0;
+}
+
+void am65_cpsw_qos_link_down(struct net_device *ndev)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+
+ if (!IS_ENABLED(CONFIG_TI_AM65_CPSW_TAS))
+ return;
+
+ if (!port->qos.link_down_time)
+ port->qos.link_down_time = ktime_get();
+
+ port->qos.link_speed = SPEED_UNKNOWN;
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-qos.h b/drivers/net/ethernet/ti/am65-cpsw-qos.h
new file mode 100644
index 000000000..fb223b43b
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-qos.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
+ */
+
+#ifndef AM65_CPSW_QOS_H_
+#define AM65_CPSW_QOS_H_
+
+#include <linux/netdevice.h>
+#include <net/pkt_sched.h>
+
+struct am65_cpsw_est {
+ int buf;
+ /* has to be the last one */
+ struct tc_taprio_qopt_offload taprio;
+};
+
+struct am65_cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
+struct am65_cpsw_qos {
+ struct am65_cpsw_est *est_admin;
+ struct am65_cpsw_est *est_oper;
+ ktime_t link_down_time;
+ int link_speed;
+
+ struct am65_cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct am65_cpsw_ale_ratelimit ale_mc_ratelimit;
+};
+
+int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed);
+void am65_cpsw_qos_link_down(struct net_device *ndev);
+
+#endif /* AM65_CPSW_QOS_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.c b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
new file mode 100644
index 000000000..d4c56da98
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.c
@@ -0,0 +1,534 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments K3 AM65 Ethernet Switchdev Driver
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "am65-cpsw-nuss.h"
+#include "am65-cpsw-switchdev.h"
+#include "cpsw_ale.h"
+
+struct am65_cpsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct am65_cpsw_port *port;
+ unsigned long event;
+};
+
+static int am65_cpsw_port_stp_state_set(struct am65_cpsw_port *port, u8 state)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ u8 cpsw_state;
+ int ret = 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ cpsw_state = ALE_PORT_STATE_FORWARD;
+ break;
+ case BR_STATE_LEARNING:
+ cpsw_state = ALE_PORT_STATE_LEARN;
+ break;
+ case BR_STATE_DISABLED:
+ cpsw_state = ALE_PORT_STATE_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ cpsw_state = ALE_PORT_STATE_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = cpsw_ale_control_set(cpsw->ale, port->port_id,
+ ALE_PORT_STATE, cpsw_state);
+ netdev_dbg(port->ndev, "ale state: %u\n", cpsw_state);
+
+ return ret;
+}
+
+static int am65_cpsw_port_attr_br_flags_set(struct am65_cpsw_port *port,
+ struct net_device *orig_dev,
+ struct switchdev_brport_flags flags)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool unreg_mcast_add = false;
+
+ if (flags.val & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+
+ netdev_dbg(port->ndev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, port->port_id);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(port->port_id),
+ unreg_mcast_add);
+ }
+
+ return 0;
+}
+
+static int am65_cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int am65_cpsw_port_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int ret;
+
+ netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, port->port_id);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = am65_cpsw_port_attr_br_flags_pre_set(ndev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = am65_cpsw_port_stp_state_set(port, attr->u.stp_state);
+ netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = am65_cpsw_port_attr_br_flags_set(port, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static u16 am65_cpsw_get_pvid(struct am65_cpsw_port *port)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
+ u32 pvid;
+
+ if (port->port_id)
+ pvid = readl(port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ else
+ pvid = readl(host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+
+ pvid = pvid & 0xfff;
+
+ return pvid;
+}
+
+static void am65_cpsw_set_pvid(struct am65_cpsw_port *port, u16 vid, bool cfi, u32 cos)
+{
+ struct am65_cpsw_common *cpsw = port->common;
+ struct am65_cpsw_host *host_p = am65_common_get_host(cpsw);
+ u32 pvid;
+
+ pvid = vid;
+ pvid |= cfi ? BIT(12) : 0;
+ pvid |= (cos & 0x7) << 13;
+
+ if (port->port_id)
+ writel(pvid, port->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+ else
+ writel(pvid, host_p->port_base + AM65_CPSW_PORT_VLAN_REG_OFFSET);
+}
+
+static int am65_cpsw_port_vlan_add(struct am65_cpsw_port *port, bool untag, bool pvid,
+ u16 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int unreg_mcast_mask = 0;
+ int reg_mcast_mask = 0;
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+ u32 flags;
+
+ if (cpu_port) {
+ port_mask = BIT(HOST_PORT_NUM);
+ flags = orig_dev->flags;
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = BIT(port->port_id);
+ flags = port->ndev->flags;
+ }
+
+ if (flags & IFF_MULTICAST)
+ reg_mcast_mask = port_mask;
+
+ if (untag)
+ untag_mask = port_mask;
+
+ ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
+ reg_mcast_mask, unreg_mcast_mask);
+ if (ret) {
+ netdev_err(port->ndev, "Unable to add vlan\n");
+ return ret;
+ }
+
+ if (cpu_port)
+ cpsw_ale_add_ucast(cpsw->ale, port->slave.mac_addr,
+ HOST_PORT_NUM, ALE_VLAN | ALE_SECURE, vid);
+ if (!pvid)
+ return ret;
+
+ am65_cpsw_set_pvid(port, vid, 0, 0);
+
+ netdev_dbg(port->ndev, "VID add: %s: vid:%u ports:%X\n",
+ port->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int am65_cpsw_port_vlan_del(struct am65_cpsw_port *port, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(port->port_id);
+
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, port_mask);
+ if (ret != 0)
+ return ret;
+
+ /* We don't care for the return value here, error is returned only if
+ * the unicast entry is not present
+ */
+ if (cpu_port)
+ cpsw_ale_del_ucast(cpsw->ale, port->slave.mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+
+ if (vid == am65_cpsw_get_pvid(port))
+ am65_cpsw_set_pvid(port, 0, 0, 0);
+
+ /* We don't care for the return value here, error is returned only if
+ * the multicast entry is not present
+ */
+ cpsw_ale_del_mcast(cpsw->ale, port->ndev->broadcast, port_mask,
+ ALE_VLAN, vid);
+ netdev_dbg(port->ndev, "VID del: %s: vid:%u ports:%X\n",
+ port->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int am65_cpsw_port_vlans_add(struct am65_cpsw_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ netdev_dbg(port->ndev, "VID add: %s: vid:%u flags:%X\n",
+ port->ndev->name, vlan->vid, vlan->flags);
+
+ return am65_cpsw_port_vlan_add(port, untag, pvid, vlan->vid, orig_dev);
+}
+
+static int am65_cpsw_port_vlans_del(struct am65_cpsw_port *port,
+ const struct switchdev_obj_port_vlan *vlan)
+
+{
+ return am65_cpsw_port_vlan_del(port, vlan->vid, vlan->obj.orig_dev);
+}
+
+static int am65_cpsw_port_mdb_add(struct am65_cpsw_port *port,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_mask;
+ int err;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(port->port_id);
+
+ err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
+ ALE_VLAN, mdb->vid, 0);
+ netdev_dbg(port->ndev, "MDB add: %s: vid %u:%pM ports: %X\n",
+ port->ndev->name, mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int am65_cpsw_port_mdb_del(struct am65_cpsw_port *port,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct am65_cpsw_common *cpsw = port->common;
+ int del_mask;
+
+ if (cpu_port)
+ del_mask = BIT(HOST_PORT_NUM);
+ else
+ del_mask = BIT(port->port_id);
+
+ /* Ignore error as error code is returned only when entry is already removed */
+ cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
+ ALE_VLAN, mdb->vid);
+ netdev_dbg(port->ndev, "MDB del: %s: vid %u:%pM ports: %X\n",
+ port->ndev->name, mdb->vid, mdb->addr, del_mask);
+
+ return 0;
+}
+
+static int am65_cpsw_port_obj_add(struct net_device *ndev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, port->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = am65_cpsw_port_vlans_add(port, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = am65_cpsw_port_mdb_add(port, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int am65_cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ int err = 0;
+
+ netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, port->port_id);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = am65_cpsw_port_vlans_del(port, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = am65_cpsw_port_mdb_del(port, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void am65_cpsw_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void am65_cpsw_switchdev_event_work(struct work_struct *work)
+{
+ struct am65_cpsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct am65_cpsw_switchdev_event_work, work);
+ struct am65_cpsw_port *port = switchdev_work->port;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct am65_cpsw_common *cpsw = port->common;
+ int port_id = port->port_id;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(port->ndev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user || fdb->is_local)
+ break;
+ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port_id = HOST_PORT_NUM;
+
+ cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ am65_cpsw_fdb_offload_notify(port->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ netdev_dbg(port->ndev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port_id);
+
+ if (!fdb->added_by_user || fdb->is_local)
+ break;
+ if (memcmp(port->slave.mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port_id = HOST_PORT_NUM;
+
+ cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port_id,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(port->ndev);
+}
+
+/* called under rcu_read_lock() */
+static int am65_cpsw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct am65_cpsw_switchdev_event_work *switchdev_work;
+ struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!am65_cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, am65_cpsw_switchdev_event_work);
+ switchdev_work->port = port;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block cpsw_switchdev_notifier = {
+ .notifier_call = am65_cpsw_switchdev_event,
+};
+
+static int am65_cpsw_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ am65_cpsw_port_dev_check,
+ am65_cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpsw_switchdev_bl_notifier = {
+ .notifier_call = am65_cpsw_switchdev_blocking_event,
+};
+
+int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+ }
+
+ return ret;
+}
+
+void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+ unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+}
diff --git a/drivers/net/ethernet/ti/am65-cpsw-switchdev.h b/drivers/net/ethernet/ti/am65-cpsw-switchdev.h
new file mode 100644
index 000000000..a67a7606b
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpsw-switchdev.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com/
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
+#define DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_
+
+#include <linux/skbuff.h>
+
+#if IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)
+static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
+{
+ skb->offload_fwd_mark = val;
+}
+
+int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw);
+void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw);
+#else
+static inline int am65_cpsw_switchdev_register_notifiers(struct am65_cpsw_common *cpsw)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void am65_cpsw_switchdev_unregister_notifiers(struct am65_cpsw_common *cpsw)
+{
+}
+
+static inline void am65_cpsw_nuss_set_offload_fwd_mark(struct sk_buff *skb, bool val)
+{
+}
+
+#endif
+
+#endif /* DRIVERS_NET_ETHERNET_TI_AM65_CPSW_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/ti/am65-cpts.c b/drivers/net/ethernet/ti/am65-cpts.c
new file mode 100644
index 000000000..9948ac14e
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.c
@@ -0,0 +1,1060 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI K3 AM65x Common Platform Time Sync
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/if_vlan.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/ptp_classify.h>
+#include <linux/ptp_clock_kernel.h>
+
+#include "am65-cpts.h"
+
+struct am65_genf_regs {
+ u32 comp_lo; /* Comparison Low Value 0:31 */
+ u32 comp_hi; /* Comparison High Value 32:63 */
+ u32 control; /* control */
+ u32 length; /* Length */
+ u32 ppm_low; /* PPM Load Low Value 0:31 */
+ u32 ppm_hi; /* PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Nudge value */
+} __aligned(32) __packed;
+
+#define AM65_CPTS_GENF_MAX_NUM 9
+#define AM65_CPTS_ESTF_MAX_NUM 8
+
+struct am65_cpts_regs {
+ u32 idver; /* Identification and version */
+ u32 control; /* Time sync control */
+ u32 rftclk_sel; /* Reference Clock Select Register */
+ u32 ts_push; /* Time stamp event push */
+ u32 ts_load_val_lo; /* Time Stamp Load Low Value 0:31 */
+ u32 ts_load_en; /* Time stamp load enable */
+ u32 ts_comp_lo; /* Time Stamp Comparison Low Value 0:31 */
+ u32 ts_comp_length; /* Time Stamp Comparison Length */
+ u32 intstat_raw; /* Time sync interrupt status raw */
+ u32 intstat_masked; /* Time sync interrupt status masked */
+ u32 int_enable; /* Time sync interrupt enable */
+ u32 ts_comp_nudge; /* Time Stamp Comparison Nudge Value */
+ u32 event_pop; /* Event interrupt pop */
+ u32 event_0; /* Event Time Stamp lo 0:31 */
+ u32 event_1; /* Event Type Fields */
+ u32 event_2; /* Event Type Fields domain */
+ u32 event_3; /* Event Time Stamp hi 32:63 */
+ u32 ts_load_val_hi; /* Time Stamp Load High Value 32:63 */
+ u32 ts_comp_hi; /* Time Stamp Comparison High Value 32:63 */
+ u32 ts_add_val; /* Time Stamp Add value */
+ u32 ts_ppm_low; /* Time Stamp PPM Load Low Value 0:31 */
+ u32 ts_ppm_hi; /* Time Stamp PPM Load High Value 32:63 */
+ u32 ts_nudge; /* Time Stamp Nudge value */
+ u32 reserv[33];
+ struct am65_genf_regs genf[AM65_CPTS_GENF_MAX_NUM];
+ struct am65_genf_regs estf[AM65_CPTS_ESTF_MAX_NUM];
+};
+
+/* CONTROL_REG */
+#define AM65_CPTS_CONTROL_EN BIT(0)
+#define AM65_CPTS_CONTROL_INT_TEST BIT(1)
+#define AM65_CPTS_CONTROL_TS_COMP_POLARITY BIT(2)
+#define AM65_CPTS_CONTROL_TSTAMP_EN BIT(3)
+#define AM65_CPTS_CONTROL_SEQUENCE_EN BIT(4)
+#define AM65_CPTS_CONTROL_64MODE BIT(5)
+#define AM65_CPTS_CONTROL_TS_COMP_TOG BIT(6)
+#define AM65_CPTS_CONTROL_TS_PPM_DIR BIT(7)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_EN BIT(8)
+#define AM65_CPTS_CONTROL_HW2_TS_PUSH_EN BIT(9)
+#define AM65_CPTS_CONTROL_HW3_TS_PUSH_EN BIT(10)
+#define AM65_CPTS_CONTROL_HW4_TS_PUSH_EN BIT(11)
+#define AM65_CPTS_CONTROL_HW5_TS_PUSH_EN BIT(12)
+#define AM65_CPTS_CONTROL_HW6_TS_PUSH_EN BIT(13)
+#define AM65_CPTS_CONTROL_HW7_TS_PUSH_EN BIT(14)
+#define AM65_CPTS_CONTROL_HW8_TS_PUSH_EN BIT(15)
+#define AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET (8)
+
+#define AM65_CPTS_CONTROL_TX_GENF_CLR_EN BIT(17)
+
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_MASK (0xF)
+#define AM65_CPTS_CONTROL_TS_SYNC_SEL_SHIFT (28)
+
+/* RFTCLK_SEL_REG */
+#define AM65_CPTS_RFTCLK_SEL_MASK (0x1F)
+
+/* TS_PUSH_REG */
+#define AM65_CPTS_TS_PUSH BIT(0)
+
+/* TS_LOAD_EN_REG */
+#define AM65_CPTS_TS_LOAD_EN BIT(0)
+
+/* INTSTAT_RAW_REG */
+#define AM65_CPTS_INTSTAT_RAW_TS_PEND BIT(0)
+
+/* INTSTAT_MASKED_REG */
+#define AM65_CPTS_INTSTAT_MASKED_TS_PEND BIT(0)
+
+/* INT_ENABLE_REG */
+#define AM65_CPTS_INT_ENABLE_TS_PEND_EN BIT(0)
+
+/* TS_COMP_NUDGE_REG */
+#define AM65_CPTS_TS_COMP_NUDGE_MASK (0xFF)
+
+/* EVENT_POP_REG */
+#define AM65_CPTS_EVENT_POP BIT(0)
+
+/* EVENT_1_REG */
+#define AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK GENMASK(15, 0)
+
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK GENMASK(19, 16)
+#define AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT (16)
+
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_MASK GENMASK(23, 20)
+#define AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT (20)
+
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_MASK GENMASK(28, 24)
+#define AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT (24)
+
+/* EVENT_2_REG */
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_MASK (0xFF)
+#define AM65_CPTS_EVENT_2_REG_DOMAIN_SHIFT (0)
+
+enum {
+ AM65_CPTS_EV_PUSH, /* Time Stamp Push Event */
+ AM65_CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+ AM65_CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+ AM65_CPTS_EV_HW, /* Hardware Time Stamp Push Event */
+ AM65_CPTS_EV_RX, /* Ethernet Receive Event */
+ AM65_CPTS_EV_TX, /* Ethernet Transmit Event */
+ AM65_CPTS_EV_TS_COMP, /* Time Stamp Compare Event */
+ AM65_CPTS_EV_HOST, /* Host Transmit Event */
+};
+
+struct am65_cpts_event {
+ struct list_head list;
+ unsigned long tmo;
+ u32 event1;
+ u32 event2;
+ u64 timestamp;
+};
+
+#define AM65_CPTS_FIFO_DEPTH (16)
+#define AM65_CPTS_MAX_EVENTS (32)
+#define AM65_CPTS_EVENT_RX_TX_TIMEOUT (20) /* ms */
+#define AM65_CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+#define AM65_CPTS_MIN_PPM 0x400
+
+struct am65_cpts {
+ struct device *dev;
+ struct am65_cpts_regs __iomem *reg;
+ struct ptp_clock_info ptp_info;
+ struct ptp_clock *ptp_clock;
+ int phc_index;
+ struct clk_hw *clk_mux_hw;
+ struct device_node *clk_mux_np;
+ struct clk *refclk;
+ u32 refclk_freq;
+ struct list_head events;
+ struct list_head pool;
+ struct am65_cpts_event pool_data[AM65_CPTS_MAX_EVENTS];
+ spinlock_t lock; /* protects events lists*/
+ u32 ext_ts_inputs;
+ u32 genf_num;
+ u32 ts_add_val;
+ int irq;
+ struct mutex ptp_clk_lock; /* PHC access sync */
+ u64 timestamp;
+ u32 genf_enable;
+ u32 hw_ts_enable;
+ struct sk_buff_head txq;
+};
+
+struct am65_cpts_skb_cb_data {
+ unsigned long tmo;
+ u32 skb_mtype_seqid;
+};
+
+#define am65_cpts_write32(c, v, r) writel(v, &(c)->reg->r)
+#define am65_cpts_read32(c, r) readl(&(c)->reg->r)
+
+static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
+{
+ u32 val;
+
+ val = upper_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_hi);
+ val = lower_32_bits(start_tstamp);
+ am65_cpts_write32(cpts, val, ts_load_val_lo);
+
+ am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
+}
+
+static void am65_cpts_set_add_val(struct am65_cpts *cpts)
+{
+ /* select coefficient according to the rate */
+ cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
+
+ am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
+}
+
+static void am65_cpts_disable(struct am65_cpts *cpts)
+{
+ am65_cpts_write32(cpts, 0, control);
+ am65_cpts_write32(cpts, 0, int_enable);
+}
+
+static int am65_cpts_event_get_port(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_PORT_NUMBER_MASK) >>
+ AM65_CPTS_EVENT_1_PORT_NUMBER_SHIFT;
+}
+
+static int am65_cpts_event_get_type(struct am65_cpts_event *event)
+{
+ return (event->event1 & AM65_CPTS_EVENT_1_EVENT_TYPE_MASK) >>
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT;
+}
+
+static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct am65_cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
+ return removed ? 0 : -1;
+}
+
+static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ u32 r = am65_cpts_read32(cpts, intstat_raw);
+
+ if (r & AM65_CPTS_INTSTAT_RAW_TS_PEND) {
+ event->timestamp = am65_cpts_read32(cpts, event_0);
+ event->event1 = am65_cpts_read32(cpts, event_1);
+ event->event2 = am65_cpts_read32(cpts, event_2);
+ event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
+ am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
+ return false;
+ }
+ return true;
+}
+
+static int am65_cpts_fifo_read(struct am65_cpts *cpts)
+{
+ struct ptp_clock_event pevent;
+ struct am65_cpts_event *event;
+ bool schedule = false;
+ int i, type, ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ for (i = 0; i < AM65_CPTS_FIFO_DEPTH; i++) {
+ event = list_first_entry_or_null(&cpts->pool,
+ struct am65_cpts_event, list);
+
+ if (!event) {
+ if (am65_cpts_cpts_purge_events(cpts)) {
+ dev_err(cpts->dev, "cpts: event pool empty\n");
+ ret = -1;
+ goto out;
+ }
+ continue;
+ }
+
+ if (am65_cpts_fifo_pop_event(cpts, event))
+ break;
+
+ type = am65_cpts_event_get_type(event);
+ switch (type) {
+ case AM65_CPTS_EV_PUSH:
+ cpts->timestamp = event->timestamp;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
+ cpts->timestamp);
+ break;
+ case AM65_CPTS_EV_RX:
+ case AM65_CPTS_EV_TX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(AM65_CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_del_init(&event->list);
+ list_add_tail(&event->list, &cpts->events);
+
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EV_TX e1:%08x e2:%08x t:%lld\n",
+ event->event1, event->event2,
+ event->timestamp);
+ schedule = true;
+ break;
+ case AM65_CPTS_EV_HW:
+ pevent.index = am65_cpts_event_get_port(event) - 1;
+ pevent.timestamp = event->timestamp;
+ pevent.type = PTP_CLOCK_EXTTS;
+ dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
+ pevent.index, event->timestamp);
+
+ ptp_clock_event(cpts->ptp_clock, &pevent);
+ break;
+ case AM65_CPTS_EV_HOST:
+ break;
+ case AM65_CPTS_EV_ROLL:
+ case AM65_CPTS_EV_HALF:
+ case AM65_CPTS_EV_TS_COMP:
+ dev_dbg(cpts->dev,
+ "AM65_CPTS_EVT: %d e1:%08x e2:%08x t:%lld\n",
+ type,
+ event->event1, event->event2,
+ event->timestamp);
+ break;
+ default:
+ dev_err(cpts->dev, "cpts: unknown event type\n");
+ ret = -1;
+ goto out;
+ }
+ }
+
+out:
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (schedule)
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+
+ return ret;
+}
+
+static u64 am65_cpts_gettime(struct am65_cpts *cpts,
+ struct ptp_system_timestamp *sts)
+{
+ unsigned long flags;
+ u64 val = 0;
+
+ /* temporarily disable cpts interrupt to avoid intentional
+ * doubled read. Interrupt can be in-flight - it's Ok.
+ */
+ am65_cpts_write32(cpts, 0, int_enable);
+
+ /* use spin_lock_irqsave() here as it has to run very fast */
+ spin_lock_irqsave(&cpts->lock, flags);
+ ptp_read_system_prets(sts);
+ am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
+ am65_cpts_read32(cpts, ts_push);
+ ptp_read_system_postts(sts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ am65_cpts_fifo_read(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ val = cpts->timestamp;
+
+ return val;
+}
+
+static irqreturn_t am65_cpts_interrupt(int irq, void *dev_id)
+{
+ struct am65_cpts *cpts = dev_id;
+
+ if (am65_cpts_fifo_read(cpts))
+ dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
+
+ return IRQ_HANDLED;
+}
+
+/* PTP clock operations */
+static int am65_cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ int neg_adj = 0;
+ u64 adj_period;
+ u32 val;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+
+ /* base freq = 1GHz = 1 000 000 000
+ * ppb_norm = ppb * base_freq / clock_freq;
+ * ppm_norm = ppb_norm / 1000
+ * adj_period = 1 000 000 / ppm_norm
+ * adj_period = 1 000 000 000 / ppb_norm
+ * adj_period = 1 000 000 000 / (ppb * base_freq / clock_freq)
+ * adj_period = (1 000 000 000 * clock_freq) / (ppb * base_freq)
+ * adj_period = clock_freq / ppb
+ */
+ adj_period = div_u64(cpts->refclk_freq, ppb);
+
+ mutex_lock(&cpts->ptp_clk_lock);
+
+ val = am65_cpts_read32(cpts, control);
+ if (neg_adj)
+ val |= AM65_CPTS_CONTROL_TS_PPM_DIR;
+ else
+ val &= ~AM65_CPTS_CONTROL_TS_PPM_DIR;
+ am65_cpts_write32(cpts, val, control);
+
+ val = upper_32_bits(adj_period) & 0x3FF;
+ am65_cpts_write32(cpts, val, ts_ppm_hi);
+ val = lower_32_bits(adj_period);
+ am65_cpts_write32(cpts, val, ts_ppm_low);
+
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ s64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ ns += delta;
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static int am65_cpts_ptp_gettimex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, sts);
+ mutex_unlock(&cpts->ptp_clk_lock);
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ u64 ns;
+
+ /* reuse ptp_clk_lock as it serialize ts push */
+ mutex_lock(&cpts->ptp_clk_lock);
+ ns = am65_cpts_gettime(cpts, NULL);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return ns;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_ns_gettime);
+
+static int am65_cpts_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_settime(cpts, ns);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ return 0;
+}
+
+static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
+{
+ u32 v;
+
+ v = am65_cpts_read32(cpts, control);
+ if (on) {
+ v |= BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable |= BIT(index);
+ } else {
+ v &= ~BIT(AM65_CPTS_CONTROL_HW1_TS_PUSH_OFFSET + index);
+ cpts->hw_ts_enable &= ~BIT(index);
+ }
+ am65_cpts_write32(cpts, v, control);
+}
+
+static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
+{
+ if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_extts_enable_hw(cpts, index, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
+ __func__, index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg)
+{
+ u64 cycles;
+ u32 val;
+
+ cycles = cfg->ns_period * cpts->refclk_freq;
+ cycles = DIV_ROUND_UP(cycles, NSEC_PER_SEC);
+ if (cycles > U32_MAX)
+ return -EINVAL;
+
+ /* according to TRM should be zeroed */
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ val = upper_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_hi);
+ val = lower_32_bits(cfg->ns_start);
+ am65_cpts_write32(cpts, val, estf[idx].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_enable);
+
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+ am65_cpts_write32(cpts, 0, estf[idx].length);
+
+ dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_estf_disable);
+
+static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ u64 ns_period, ns_start, cycles;
+ struct timespec64 ts;
+ u32 val;
+
+ if (on) {
+ ts.tv_sec = req->period.sec;
+ ts.tv_nsec = req->period.nsec;
+ ns_period = timespec64_to_ns(&ts);
+
+ cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
+
+ ts.tv_sec = req->start.sec;
+ ts.tv_nsec = req->start.nsec;
+ ns_start = timespec64_to_ns(&ts);
+
+ val = upper_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
+ val = lower_32_bits(ns_start);
+ am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
+ val = lower_32_bits(cycles);
+ am65_cpts_write32(cpts, val, genf[req->index].length);
+
+ cpts->genf_enable |= BIT(req->index);
+ } else {
+ am65_cpts_write32(cpts, 0, genf[req->index].length);
+
+ cpts->genf_enable &= ~BIT(req->index);
+ }
+}
+
+static int am65_cpts_perout_enable(struct am65_cpts *cpts,
+ struct ptp_perout_request *req, int on)
+{
+ if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ am65_cpts_perout_enable_hw(cpts, req, on);
+ mutex_unlock(&cpts->ptp_clk_lock);
+
+ dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
+ __func__, req->index, on ? "enabled" : "disabled");
+
+ return 0;
+}
+
+static int am65_cpts_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return am65_cpts_extts_enable(cpts, rq->extts.index, on);
+ case PTP_CLK_REQ_PEROUT:
+ return am65_cpts_perout_enable(cpts, &rq->perout, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp);
+
+static struct ptp_clock_info am65_ptp_info = {
+ .owner = THIS_MODULE,
+ .name = "CTPS timer",
+ .adjfreq = am65_cpts_ptp_adjfreq,
+ .adjtime = am65_cpts_ptp_adjtime,
+ .gettimex64 = am65_cpts_ptp_gettimex,
+ .settime64 = am65_cpts_ptp_settime,
+ .enable = am65_cpts_ptp_enable,
+ .do_aux_work = am65_cpts_ts_work,
+};
+
+static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
+ struct am65_cpts_event *event)
+{
+ struct sk_buff_head txq_list;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ bool found = false;
+ u32 mtype_seqid;
+
+ mtype_seqid = event->event1 &
+ (AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK |
+ AM65_CPTS_EVENT_1_EVENT_TYPE_MASK |
+ AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ __skb_queue_head_init(&txq_list);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice_init(&cpts->txq, &txq_list);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ /* no need to grab txq.lock as access is always done under cpts->lock */
+ skb_queue_walk_safe(&txq_list, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ struct am65_cpts_skb_cb_data *skb_cb =
+ (struct am65_cpts_skb_cb_data *)skb->cb;
+
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ u64 ns = event->timestamp;
+
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(ns);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev,
+ "match tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 100 ms */
+ dev_dbg(cpts->dev,
+ "expiring tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice(&txq_list, &cpts->txq);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return found;
+}
+
+static void am65_cpts_find_ts(struct am65_cpts *cpts)
+{
+ struct am65_cpts_event *event;
+ struct list_head *this, *next;
+ LIST_HEAD(events_free);
+ unsigned long flags;
+ LIST_HEAD(events);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_init(&cpts->events, &events);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ list_for_each_safe(this, next, &events) {
+ event = list_entry(this, struct am65_cpts_event, list);
+ if (am65_cpts_match_tx_ts(cpts, event) ||
+ time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &events_free);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events_free, &cpts->pool);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+}
+
+static long am65_cpts_ts_work(struct ptp_clock_info *ptp)
+{
+ struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
+ unsigned long flags;
+ long delay = -1;
+
+ am65_cpts_find_ts(cpts);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = AM65_CPTS_SKB_TX_WORK_TIMEOUT;
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return delay;
+}
+
+/**
+ * am65_cpts_rx_enable - enable rx timestamping
+ * @cpts: cpts handle
+ * @en: enable
+ *
+ * This functions enables rx packets timestamping. The CPTS can timestamp all
+ * rx packets.
+ */
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+ u32 val;
+
+ mutex_lock(&cpts->ptp_clk_lock);
+ val = am65_cpts_read32(cpts, control);
+ if (en)
+ val |= AM65_CPTS_CONTROL_TSTAMP_EN;
+ else
+ val &= ~AM65_CPTS_CONTROL_TSTAMP_EN;
+ am65_cpts_write32(cpts, val, control);
+ mutex_unlock(&cpts->ptp_clk_lock);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_rx_enable);
+
+static int am65_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
+{
+ unsigned int ptp_class = ptp_classify_raw(skb);
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return 0;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return 0;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ seqid = ntohs(hdr->sequence_id);
+
+ *mtype_seqid = (msgtype << AM65_CPTS_EVENT_1_MESSAGE_TYPE_SHIFT) &
+ AM65_CPTS_EVENT_1_MESSAGE_TYPE_MASK;
+ *mtype_seqid |= (seqid & AM65_CPTS_EVENT_1_SEQUENCE_ID_MASK);
+
+ return 1;
+}
+
+/**
+ * am65_cpts_tx_timestamp - save tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions saves tx packet for timestamping if packet can be timestamped.
+ * The future processing is done in from PTP auxiliary worker.
+ */
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ return;
+
+ /* add frame to queue for processing later.
+ * The periodic FIFO check will handle this.
+ */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(100);
+ skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->ptp_clock, 0);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_tx_timestamp);
+
+/**
+ * am65_cpts_prep_tx_timestamp - check and prepare tx packet for timestamping
+ * @cpts: cpts handle
+ * @skb: packet
+ *
+ * This functions should be called from .xmit().
+ * It checks if packet can be timestamped, fills internal cpts data
+ * in skb-cb and marks packet as SKBTX_IN_PROGRESS.
+ */
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
+{
+ struct am65_cpts_skb_cb_data *skb_cb = (void *)skb->cb;
+ int ret;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
+ return;
+
+ ret = am65_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+ skb_cb->skb_mtype_seqid |= (AM65_CPTS_EV_TX <<
+ AM65_CPTS_EVENT_1_EVENT_TYPE_SHIFT);
+
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_prep_tx_timestamp);
+
+int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return cpts->phc_index;
+}
+EXPORT_SYMBOL_GPL(am65_cpts_phc_index);
+
+static void cpts_free_clk_mux(void *data)
+{
+ struct am65_cpts *cpts = data;
+
+ of_clk_del_provider(cpts->clk_mux_np);
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+ of_node_put(cpts->clk_mux_np);
+}
+
+static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
+ struct device_node *node)
+{
+ unsigned int num_parents;
+ const char **parent_names;
+ char *clk_mux_name;
+ void __iomem *reg;
+ int ret = -EINVAL;
+
+ cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
+ if (!cpts->clk_mux_np)
+ return 0;
+
+ num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
+ if (num_parents < 1) {
+ dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
+ cpts->clk_mux_np);
+ goto mux_fail;
+ }
+
+ parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
+ GFP_KERNEL);
+ if (!parent_names) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
+
+ clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
+ dev_name(cpts->dev), cpts->clk_mux_np);
+ if (!clk_mux_name) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ reg = &cpts->reg->rftclk_sel;
+ /* dev must be NULL to avoid recursive incrementing
+ * of module refcnt
+ */
+ cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
+ parent_names, num_parents,
+ 0, reg, 0, 5, 0, NULL);
+ if (IS_ERR(cpts->clk_mux_hw)) {
+ ret = PTR_ERR(cpts->clk_mux_hw);
+ goto mux_fail;
+ }
+
+ ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
+ cpts->clk_mux_hw);
+ if (ret)
+ goto clk_hw_register;
+
+ ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
+ if (ret)
+ dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
+
+ return ret;
+
+clk_hw_register:
+ clk_hw_unregister_mux(cpts->clk_mux_hw);
+mux_fail:
+ of_node_put(cpts->clk_mux_np);
+ return ret;
+}
+
+static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
+{
+ u32 prop[2];
+
+ if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
+ cpts->ext_ts_inputs = prop[0];
+
+ if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
+ cpts->genf_num = prop[0];
+
+ return cpts_of_mux_clk_setup(cpts, node);
+}
+
+void am65_cpts_release(struct am65_cpts *cpts)
+{
+ ptp_clock_unregister(cpts->ptp_clock);
+ am65_cpts_disable(cpts);
+ clk_disable_unprepare(cpts->refclk);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_release);
+
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node)
+{
+ struct am65_cpts *cpts;
+ int ret, i;
+
+ cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
+ if (!cpts)
+ return ERR_PTR(-ENOMEM);
+
+ cpts->dev = dev;
+ cpts->reg = (struct am65_cpts_regs __iomem *)regs;
+
+ cpts->irq = of_irq_get_byname(node, "cpts");
+ if (cpts->irq <= 0) {
+ ret = cpts->irq ?: -ENXIO;
+ dev_err_probe(dev, ret, "Failed to get IRQ number\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = am65_cpts_of_parse(cpts, node);
+ if (ret)
+ return ERR_PTR(ret);
+
+ mutex_init(&cpts->ptp_clk_lock);
+ INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->pool);
+ spin_lock_init(&cpts->lock);
+ skb_queue_head_init(&cpts->txq);
+
+ for (i = 0; i < AM65_CPTS_MAX_EVENTS; i++)
+ list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+ cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
+ if (IS_ERR(cpts->refclk)) {
+ ret = PTR_ERR(cpts->refclk);
+ dev_err_probe(dev, ret, "Failed to get refclk\n");
+ return ERR_PTR(ret);
+ }
+
+ ret = clk_prepare_enable(cpts->refclk);
+ if (ret) {
+ dev_err(dev, "Failed to enable refclk %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ cpts->refclk_freq = clk_get_rate(cpts->refclk);
+
+ am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
+ cpts->ptp_info = am65_ptp_info;
+
+ if (cpts->ext_ts_inputs)
+ cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
+ if (cpts->genf_num)
+ cpts->ptp_info.n_per_out = cpts->genf_num;
+
+ am65_cpts_set_add_val(cpts);
+
+ am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN |
+ AM65_CPTS_CONTROL_64MODE |
+ AM65_CPTS_CONTROL_TX_GENF_CLR_EN,
+ control);
+ am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
+
+ /* set time to the current system time */
+ am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
+
+ cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
+ if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
+ dev_err(dev, "Failed to register ptp clk %ld\n",
+ PTR_ERR(cpts->ptp_clock));
+ ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
+ goto refclk_disable;
+ }
+ cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
+
+ ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
+ am65_cpts_interrupt,
+ IRQF_ONESHOT, dev_name(dev), cpts);
+ if (ret < 0) {
+ dev_err(cpts->dev, "error attaching irq %d\n", ret);
+ goto reset_ptpclk;
+ }
+
+ dev_info(dev, "CPTS ver 0x%08x, freq:%u, add_val:%u\n",
+ am65_cpts_read32(cpts, idver),
+ cpts->refclk_freq, cpts->ts_add_val);
+
+ return cpts;
+
+reset_ptpclk:
+ am65_cpts_release(cpts);
+refclk_disable:
+ clk_disable_unprepare(cpts->refclk);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(am65_cpts_create);
+
+static int am65_cpts_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device *dev = &pdev->dev;
+ struct am65_cpts *cpts;
+ void __iomem *base;
+
+ base = devm_platform_ioremap_resource_byname(pdev, "cpts");
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ cpts = am65_cpts_create(dev, base, node);
+ return PTR_ERR_OR_ZERO(cpts);
+}
+
+static const struct of_device_id am65_cpts_of_match[] = {
+ { .compatible = "ti,am65-cpts", },
+ { .compatible = "ti,j721e-cpts", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, am65_cpts_of_match);
+
+static struct platform_driver am65_cpts_driver = {
+ .probe = am65_cpts_probe,
+ .driver = {
+ .name = "am65-cpts",
+ .of_match_table = am65_cpts_of_match,
+ },
+};
+module_platform_driver(am65_cpts_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Grygorii Strashko <grygorii.strashko@ti.com>");
+MODULE_DESCRIPTION("TI K3 AM65 CPTS driver");
diff --git a/drivers/net/ethernet/ti/am65-cpts.h b/drivers/net/ethernet/ti/am65-cpts.h
new file mode 100644
index 000000000..c0ae0117e
--- /dev/null
+++ b/drivers/net/ethernet/ti/am65-cpts.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/* TI K3 AM65 CPTS driver interface
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_CPTS_H_
+#define K3_CPTS_H_
+
+#include <linux/device.h>
+#include <linux/of.h>
+
+struct am65_cpts;
+
+struct am65_cpts_estf_cfg {
+ u64 ns_period;
+ u64 ns_start;
+};
+
+#if IS_ENABLED(CONFIG_TI_K3_AM65_CPTS)
+void am65_cpts_release(struct am65_cpts *cpts);
+struct am65_cpts *am65_cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node);
+int am65_cpts_phc_index(struct am65_cpts *cpts);
+void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb);
+void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en);
+u64 am65_cpts_ns_gettime(struct am65_cpts *cpts);
+int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg);
+void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx);
+#else
+static inline void am65_cpts_release(struct am65_cpts *cpts)
+{
+}
+
+static inline struct am65_cpts *am65_cpts_create(struct device *dev,
+ void __iomem *regs,
+ struct device_node *node)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline int am65_cpts_phc_index(struct am65_cpts *cpts)
+{
+ return -1;
+}
+
+static inline void am65_cpts_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts,
+ struct sk_buff *skb)
+{
+}
+
+static inline void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
+{
+}
+
+static inline s64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
+{
+ return 0;
+}
+
+static inline int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
+ struct am65_cpts_estf_cfg *cfg)
+{
+ return 0;
+}
+
+static inline void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
+{
+}
+#endif
+
+#endif /* K3_CPTS_H_ */
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
new file mode 100644
index 000000000..80eeeb463
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -0,0 +1,1251 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2006, 2007 Eugene Konev
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/moduleparam.h>
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+
+#include <linux/netdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <linux/mii.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/atomic.h>
+
+#include <asm/mach-ar7/ar7.h>
+
+MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
+MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:cpmac");
+
+static int debug_level = 8;
+static int dumb_switch;
+
+/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
+module_param(debug_level, int, 0444);
+module_param(dumb_switch, int, 0444);
+
+MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
+MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
+
+#define CPMAC_VERSION "0.5.2"
+/* frame size + 802.1q tag + FCS size */
+#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define CPMAC_QUEUES 8
+
+/* Ethernet registers */
+#define CPMAC_TX_CONTROL 0x0004
+#define CPMAC_TX_TEARDOWN 0x0008
+#define CPMAC_RX_CONTROL 0x0014
+#define CPMAC_RX_TEARDOWN 0x0018
+#define CPMAC_MBP 0x0100
+#define MBP_RXPASSCRC 0x40000000
+#define MBP_RXQOS 0x20000000
+#define MBP_RXNOCHAIN 0x10000000
+#define MBP_RXCMF 0x01000000
+#define MBP_RXSHORT 0x00800000
+#define MBP_RXCEF 0x00400000
+#define MBP_RXPROMISC 0x00200000
+#define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
+#define MBP_RXBCAST 0x00002000
+#define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
+#define MBP_RXMCAST 0x00000020
+#define MBP_MCASTCHAN(channel) ((channel) & 0x7)
+#define CPMAC_UNICAST_ENABLE 0x0104
+#define CPMAC_UNICAST_CLEAR 0x0108
+#define CPMAC_MAX_LENGTH 0x010c
+#define CPMAC_BUFFER_OFFSET 0x0110
+#define CPMAC_MAC_CONTROL 0x0160
+#define MAC_TXPTYPE 0x00000200
+#define MAC_TXPACE 0x00000040
+#define MAC_MII 0x00000020
+#define MAC_TXFLOW 0x00000010
+#define MAC_RXFLOW 0x00000008
+#define MAC_MTEST 0x00000004
+#define MAC_LOOPBACK 0x00000002
+#define MAC_FDX 0x00000001
+#define CPMAC_MAC_STATUS 0x0164
+#define MAC_STATUS_QOS 0x00000004
+#define MAC_STATUS_RXFLOW 0x00000002
+#define MAC_STATUS_TXFLOW 0x00000001
+#define CPMAC_TX_INT_ENABLE 0x0178
+#define CPMAC_TX_INT_CLEAR 0x017c
+#define CPMAC_MAC_INT_VECTOR 0x0180
+#define MAC_INT_STATUS 0x00080000
+#define MAC_INT_HOST 0x00040000
+#define MAC_INT_RX 0x00020000
+#define MAC_INT_TX 0x00010000
+#define CPMAC_MAC_EOI_VECTOR 0x0184
+#define CPMAC_RX_INT_ENABLE 0x0198
+#define CPMAC_RX_INT_CLEAR 0x019c
+#define CPMAC_MAC_INT_ENABLE 0x01a8
+#define CPMAC_MAC_INT_CLEAR 0x01ac
+#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
+#define CPMAC_MAC_ADDR_MID 0x01d0
+#define CPMAC_MAC_ADDR_HI 0x01d4
+#define CPMAC_MAC_HASH_LO 0x01d8
+#define CPMAC_MAC_HASH_HI 0x01dc
+#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
+#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
+#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
+#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
+#define CPMAC_REG_END 0x0680
+
+/* Rx/Tx statistics
+ * TODO: use some of them to fill stats in cpmac_stats()
+ */
+#define CPMAC_STATS_RX_GOOD 0x0200
+#define CPMAC_STATS_RX_BCAST 0x0204
+#define CPMAC_STATS_RX_MCAST 0x0208
+#define CPMAC_STATS_RX_PAUSE 0x020c
+#define CPMAC_STATS_RX_CRC 0x0210
+#define CPMAC_STATS_RX_ALIGN 0x0214
+#define CPMAC_STATS_RX_OVER 0x0218
+#define CPMAC_STATS_RX_JABBER 0x021c
+#define CPMAC_STATS_RX_UNDER 0x0220
+#define CPMAC_STATS_RX_FRAG 0x0224
+#define CPMAC_STATS_RX_FILTER 0x0228
+#define CPMAC_STATS_RX_QOSFILTER 0x022c
+#define CPMAC_STATS_RX_OCTETS 0x0230
+
+#define CPMAC_STATS_TX_GOOD 0x0234
+#define CPMAC_STATS_TX_BCAST 0x0238
+#define CPMAC_STATS_TX_MCAST 0x023c
+#define CPMAC_STATS_TX_PAUSE 0x0240
+#define CPMAC_STATS_TX_DEFER 0x0244
+#define CPMAC_STATS_TX_COLLISION 0x0248
+#define CPMAC_STATS_TX_SINGLECOLL 0x024c
+#define CPMAC_STATS_TX_MULTICOLL 0x0250
+#define CPMAC_STATS_TX_EXCESSCOLL 0x0254
+#define CPMAC_STATS_TX_LATECOLL 0x0258
+#define CPMAC_STATS_TX_UNDERRUN 0x025c
+#define CPMAC_STATS_TX_CARRIERSENSE 0x0260
+#define CPMAC_STATS_TX_OCTETS 0x0264
+
+#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
+#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
+ (reg)))
+
+/* MDIO bus */
+#define CPMAC_MDIO_VERSION 0x0000
+#define CPMAC_MDIO_CONTROL 0x0004
+#define MDIOC_IDLE 0x80000000
+#define MDIOC_ENABLE 0x40000000
+#define MDIOC_PREAMBLE 0x00100000
+#define MDIOC_FAULT 0x00080000
+#define MDIOC_FAULTDETECT 0x00040000
+#define MDIOC_INTTEST 0x00020000
+#define MDIOC_CLKDIV(div) ((div) & 0xff)
+#define CPMAC_MDIO_ALIVE 0x0008
+#define CPMAC_MDIO_LINK 0x000c
+#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
+#define MDIO_BUSY 0x80000000
+#define MDIO_WRITE 0x40000000
+#define MDIO_REG(reg) (((reg) & 0x1f) << 21)
+#define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
+#define MDIO_DATA(data) ((data) & 0xffff)
+#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
+#define PHYSEL_LINKSEL 0x00000040
+#define PHYSEL_LINKINT 0x00000020
+
+struct cpmac_desc {
+ u32 hw_next;
+ u32 hw_data;
+ u16 buflen;
+ u16 bufflags;
+ u16 datalen;
+ u16 dataflags;
+#define CPMAC_SOP 0x8000
+#define CPMAC_EOP 0x4000
+#define CPMAC_OWN 0x2000
+#define CPMAC_EOQ 0x1000
+ struct sk_buff *skb;
+ struct cpmac_desc *next;
+ struct cpmac_desc *prev;
+ dma_addr_t mapping;
+ dma_addr_t data_mapping;
+};
+
+struct cpmac_priv {
+ spinlock_t lock;
+ spinlock_t rx_lock;
+ struct cpmac_desc *rx_head;
+ int ring_size;
+ struct cpmac_desc *desc_ring;
+ dma_addr_t dma_ring;
+ void __iomem *regs;
+ struct mii_bus *mii_bus;
+ char phy_name[MII_BUS_ID_SIZE + 3];
+ int oldlink, oldspeed, oldduplex;
+ u32 msg_enable;
+ struct net_device *dev;
+ struct work_struct reset_work;
+ struct platform_device *pdev;
+ struct napi_struct napi;
+ atomic_t reset_pending;
+};
+
+static irqreturn_t cpmac_irq(int, void *);
+static void cpmac_hw_start(struct net_device *dev);
+static void cpmac_hw_stop(struct net_device *dev);
+static int cpmac_stop(struct net_device *dev);
+static int cpmac_open(struct net_device *dev);
+
+static void cpmac_dump_regs(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ for (i = 0; i < CPMAC_REG_END; i += 4) {
+ if (i % 16 == 0) {
+ if (i)
+ printk("\n");
+ printk("%s: reg[%p]:", dev->name, priv->regs + i);
+ }
+ printk(" %08x", cpmac_read(priv->regs, i));
+ }
+ printk("\n");
+}
+
+static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
+{
+ int i;
+
+ printk("%s: desc[%p]:", dev->name, desc);
+ for (i = 0; i < sizeof(*desc) / 4; i++)
+ printk(" %08x", ((u32 *)desc)[i]);
+ printk("\n");
+}
+
+static void cpmac_dump_all_desc(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *dump = priv->rx_head;
+
+ do {
+ cpmac_dump_desc(dev, dump);
+ dump = dump->next;
+ } while (dump != priv->rx_head);
+}
+
+static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
+{
+ int i;
+
+ printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
+ for (i = 0; i < skb->len; i++) {
+ if (i % 16 == 0) {
+ if (i)
+ printk("\n");
+ printk("%s: data[%p]:", dev->name, skb->data + i);
+ }
+ printk(" %02x", ((u8 *)skb->data)[i]);
+ }
+ printk("\n");
+}
+
+static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
+{
+ u32 val;
+
+ while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+ cpu_relax();
+ cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
+ MDIO_PHY(phy_id));
+ while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
+ cpu_relax();
+
+ return MDIO_DATA(val);
+}
+
+static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
+ int reg, u16 val)
+{
+ while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
+ cpu_relax();
+ cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
+ MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
+
+ return 0;
+}
+
+static int cpmac_mdio_reset(struct mii_bus *bus)
+{
+ struct clk *cpmac_clk;
+
+ cpmac_clk = clk_get(&bus->dev, "cpmac");
+ if (IS_ERR(cpmac_clk)) {
+ pr_err("unable to get cpmac clock\n");
+ return -1;
+ }
+ ar7_device_reset(AR7_RESET_BIT_MDIO);
+ cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
+ MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
+
+ return 0;
+}
+
+static struct mii_bus *cpmac_mii;
+
+static void cpmac_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ u8 tmp;
+ u32 mbp, bit, hash[2] = { 0, };
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ mbp = cpmac_read(priv->regs, CPMAC_MBP);
+ if (dev->flags & IFF_PROMISC) {
+ cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
+ MBP_RXPROMISC);
+ } else {
+ cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
+ if (dev->flags & IFF_ALLMULTI) {
+ /* enable all multicast mode */
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
+ } else {
+ /* cpmac uses some strange mac address hashing
+ * (not crc32)
+ */
+ netdev_for_each_mc_addr(ha, dev) {
+ bit = 0;
+ tmp = ha->addr[0];
+ bit ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = ha->addr[1];
+ bit ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = ha->addr[2];
+ bit ^= (tmp >> 6) ^ tmp;
+ tmp = ha->addr[3];
+ bit ^= (tmp >> 2) ^ (tmp << 4);
+ tmp = ha->addr[4];
+ bit ^= (tmp >> 4) ^ (tmp << 2);
+ tmp = ha->addr[5];
+ bit ^= (tmp >> 6) ^ tmp;
+ bit &= 0x3f;
+ hash[bit / 32] |= 1 << (bit % 32);
+ }
+
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
+ cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
+ }
+ }
+}
+
+static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
+ struct cpmac_desc *desc)
+{
+ struct sk_buff *skb, *result = NULL;
+
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(priv->dev, desc);
+ cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
+ if (unlikely(!desc->datalen)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ netdev_warn(priv->dev, "rx: spurious interrupt\n");
+
+ return NULL;
+ }
+
+ skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
+ if (likely(skb)) {
+ skb_put(desc->skb, desc->datalen);
+ desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
+ skb_checksum_none_assert(desc->skb);
+ priv->dev->stats.rx_packets++;
+ priv->dev->stats.rx_bytes += desc->datalen;
+ result = desc->skb;
+ dma_unmap_single(&priv->dev->dev, desc->data_mapping,
+ CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ if (unlikely(netif_msg_pktdata(priv))) {
+ netdev_dbg(priv->dev, "received packet:\n");
+ cpmac_dump_skb(priv->dev, result);
+ }
+ } else {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ netdev_warn(priv->dev,
+ "low on skbs, dropping packet\n");
+
+ priv->dev->stats.rx_dropped++;
+ }
+
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+
+ return result;
+}
+
+static int cpmac_poll(struct napi_struct *napi, int budget)
+{
+ struct sk_buff *skb;
+ struct cpmac_desc *desc, *restart;
+ struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
+ int received = 0, processed = 0;
+
+ spin_lock(&priv->rx_lock);
+ if (unlikely(!priv->rx_head)) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ netdev_warn(priv->dev, "rx: polling, but no queue\n");
+
+ spin_unlock(&priv->rx_lock);
+ napi_complete(napi);
+ return 0;
+ }
+
+ desc = priv->rx_head;
+ restart = NULL;
+ while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
+ processed++;
+
+ if ((desc->dataflags & CPMAC_EOQ) != 0) {
+ /* The last update to eoq->hw_next didn't happen
+ * soon enough, and the receiver stopped here.
+ * Remember this descriptor so we can restart
+ * the receiver after freeing some space.
+ */
+ if (unlikely(restart)) {
+ if (netif_msg_rx_err(priv))
+ netdev_err(priv->dev, "poll found a"
+ " duplicate EOQ: %p and %p\n",
+ restart, desc);
+ goto fatal_error;
+ }
+
+ restart = desc->next;
+ }
+
+ skb = cpmac_rx_one(priv, desc);
+ if (likely(skb)) {
+ netif_receive_skb(skb);
+ received++;
+ }
+ desc = desc->next;
+ }
+
+ if (desc != priv->rx_head) {
+ /* We freed some buffers, but not the whole ring,
+ * add what we did free to the rx list
+ */
+ desc->prev->hw_next = (u32)0;
+ priv->rx_head->prev->hw_next = priv->rx_head->mapping;
+ }
+
+ /* Optimization: If we did not actually process an EOQ (perhaps because
+ * of quota limits), check to see if the tail of the queue has EOQ set.
+ * We should immediately restart in that case so that the receiver can
+ * restart and run in parallel with more packet processing.
+ * This lets us handle slightly larger bursts before running
+ * out of ring space (assuming dev->weight < ring_size)
+ */
+
+ if (!restart &&
+ (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
+ == CPMAC_EOQ &&
+ (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
+ /* reset EOQ so the poll loop (above) doesn't try to
+ * restart this when it eventually gets to this descriptor.
+ */
+ priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
+ restart = priv->rx_head;
+ }
+
+ if (restart) {
+ priv->dev->stats.rx_errors++;
+ priv->dev->stats.rx_fifo_errors++;
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ netdev_warn(priv->dev, "rx dma ring overrun\n");
+
+ if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
+ if (netif_msg_drv(priv))
+ netdev_err(priv->dev, "cpmac_poll is trying "
+ "to restart rx from a descriptor "
+ "that's not free: %p\n", restart);
+ goto fatal_error;
+ }
+
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
+ }
+
+ priv->rx_head = desc;
+ spin_unlock(&priv->rx_lock);
+ if (unlikely(netif_msg_rx_status(priv)))
+ netdev_dbg(priv->dev, "poll processed %d packets\n", received);
+
+ if (processed == 0) {
+ /* we ran out of packets to read,
+ * revert to interrupt-driven mode
+ */
+ napi_complete(napi);
+ cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+ return 0;
+ }
+
+ return 1;
+
+fatal_error:
+ /* Something went horribly wrong.
+ * Reset hardware to try to recover rather than wedging.
+ */
+ if (netif_msg_drv(priv)) {
+ netdev_err(priv->dev, "cpmac_poll is confused. "
+ "Resetting hardware\n");
+ cpmac_dump_all_desc(priv->dev);
+ netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
+ cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
+ cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
+ }
+
+ spin_unlock(&priv->rx_lock);
+ napi_complete(napi);
+ netif_tx_stop_all_queues(priv->dev);
+ napi_disable(&priv->napi);
+
+ atomic_inc(&priv->reset_pending);
+ cpmac_hw_stop(priv->dev);
+ if (!schedule_work(&priv->reset_work))
+ atomic_dec(&priv->reset_pending);
+
+ return 0;
+
+}
+
+static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ int queue;
+ unsigned int len;
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (unlikely(atomic_read(&priv->reset_pending)))
+ return NETDEV_TX_BUSY;
+
+ if (unlikely(skb_padto(skb, ETH_ZLEN)))
+ return NETDEV_TX_OK;
+
+ len = max_t(unsigned int, skb->len, ETH_ZLEN);
+ queue = skb_get_queue_mapping(skb);
+ netif_stop_subqueue(dev, queue);
+
+ desc = &priv->desc_ring[queue];
+ if (unlikely(desc->dataflags & CPMAC_OWN)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_warn(dev, "tx dma ring full\n");
+
+ return NETDEV_TX_BUSY;
+ }
+
+ spin_lock(&priv->lock);
+ spin_unlock(&priv->lock);
+ desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
+ DMA_TO_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ desc->datalen = len;
+ desc->buflen = len;
+ if (unlikely(netif_msg_tx_queued(priv)))
+ netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(dev, desc);
+ if (unlikely(netif_msg_pktdata(priv)))
+ cpmac_dump_skb(dev, skb);
+ cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
+
+ return NETDEV_TX_OK;
+}
+
+static void cpmac_end_xmit(struct net_device *dev, int queue)
+{
+ struct cpmac_desc *desc;
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ desc = &priv->desc_ring[queue];
+ cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
+ if (likely(desc->skb)) {
+ spin_lock(&priv->lock);
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += desc->skb->len;
+ spin_unlock(&priv->lock);
+ dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
+ DMA_TO_DEVICE);
+
+ if (unlikely(netif_msg_tx_done(priv)))
+ netdev_dbg(dev, "sent 0x%p, len=%d\n",
+ desc->skb, desc->skb->len);
+
+ dev_consume_skb_irq(desc->skb);
+ desc->skb = NULL;
+ if (__netif_subqueue_stopped(dev, queue))
+ netif_wake_subqueue(dev, queue);
+ } else {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_warn(dev, "end_xmit: spurious interrupt\n");
+ if (__netif_subqueue_stopped(dev, queue))
+ netif_wake_subqueue(dev, queue);
+ }
+}
+
+static void cpmac_hw_stop(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
+
+ ar7_device_reset(pdata->reset_bit);
+ cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
+ cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
+ for (i = 0; i < 8; i++) {
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+ }
+ cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+ cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
+}
+
+static void cpmac_hw_start(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
+
+ ar7_device_reset(pdata->reset_bit);
+ for (i = 0; i < 8; i++) {
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
+ }
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
+
+ cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
+ MBP_RXMCAST);
+ cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
+ for (i = 0; i < 8; i++)
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
+ cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
+ (dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
+ (dev->dev_addr[3] << 24));
+ cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
+ cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+ cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
+ cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
+ cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
+
+ cpmac_write(priv->regs, CPMAC_RX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
+ cpmac_write(priv->regs, CPMAC_TX_CONTROL,
+ cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
+ cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
+ cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
+ MAC_FDX);
+}
+
+static void cpmac_clear_rx(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct cpmac_desc *desc;
+ int i;
+
+ if (unlikely(!priv->rx_head))
+ return;
+ desc = priv->rx_head;
+ for (i = 0; i < priv->ring_size; i++) {
+ if ((desc->dataflags & CPMAC_OWN) == 0) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ netdev_warn(dev, "packet dropped\n");
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_desc(dev, desc);
+ desc->dataflags = CPMAC_OWN;
+ dev->stats.rx_dropped++;
+ }
+ desc->hw_next = desc->next->mapping;
+ desc = desc->next;
+ }
+ priv->rx_head->prev->hw_next = 0;
+}
+
+static void cpmac_clear_tx(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (unlikely(!priv->desc_ring))
+ return;
+ for (i = 0; i < CPMAC_QUEUES; i++) {
+ priv->desc_ring[i].dataflags = 0;
+ if (priv->desc_ring[i].skb) {
+ dev_kfree_skb_any(priv->desc_ring[i].skb);
+ priv->desc_ring[i].skb = NULL;
+ }
+ }
+}
+
+static void cpmac_hw_error(struct work_struct *work)
+{
+ struct cpmac_priv *priv =
+ container_of(work, struct cpmac_priv, reset_work);
+
+ spin_lock(&priv->rx_lock);
+ cpmac_clear_rx(priv->dev);
+ spin_unlock(&priv->rx_lock);
+ cpmac_clear_tx(priv->dev);
+ cpmac_hw_start(priv->dev);
+ barrier();
+ atomic_dec(&priv->reset_pending);
+
+ netif_tx_wake_all_queues(priv->dev);
+ cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
+}
+
+static void cpmac_check_status(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
+ int rx_channel = (macstatus >> 8) & 7;
+ int rx_code = (macstatus >> 12) & 15;
+ int tx_channel = (macstatus >> 16) & 7;
+ int tx_code = (macstatus >> 20) & 15;
+
+ if (rx_code || tx_code) {
+ if (netif_msg_drv(priv) && net_ratelimit()) {
+ /* Can't find any documentation on what these
+ * error codes actually are. So just log them and hope..
+ */
+ if (rx_code)
+ netdev_warn(dev, "host error %d on rx "
+ "channel %d (macstatus %08x), resetting\n",
+ rx_code, rx_channel, macstatus);
+ if (tx_code)
+ netdev_warn(dev, "host error %d on tx "
+ "channel %d (macstatus %08x), resetting\n",
+ tx_code, tx_channel, macstatus);
+ }
+
+ netif_tx_stop_all_queues(dev);
+ cpmac_hw_stop(dev);
+ if (schedule_work(&priv->reset_work))
+ atomic_inc(&priv->reset_pending);
+ if (unlikely(netif_msg_hw(priv)))
+ cpmac_dump_regs(dev);
+ }
+ cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
+}
+
+static irqreturn_t cpmac_irq(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct cpmac_priv *priv;
+ int queue;
+ u32 status;
+
+ priv = netdev_priv(dev);
+
+ status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
+
+ if (unlikely(netif_msg_intr(priv)))
+ netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
+
+ if (status & MAC_INT_TX)
+ cpmac_end_xmit(dev, (status & 7));
+
+ if (status & MAC_INT_RX) {
+ queue = (status >> 8) & 7;
+ if (napi_schedule_prep(&priv->napi)) {
+ cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
+
+ if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
+ cpmac_check_status(dev);
+
+ return IRQ_HANDLED;
+}
+
+static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ spin_lock(&priv->lock);
+ dev->stats.tx_errors++;
+ spin_unlock(&priv->lock);
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ netdev_warn(dev, "transmit timeout\n");
+
+ atomic_inc(&priv->reset_pending);
+ barrier();
+ cpmac_clear_tx(dev);
+ barrier();
+ atomic_dec(&priv->reset_pending);
+
+ netif_tx_wake_all_queues(priv->dev);
+}
+
+static void cpmac_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ ring->rx_max_pending = 1024;
+ ring->rx_mini_max_pending = 1;
+ ring->rx_jumbo_max_pending = 1;
+ ring->tx_max_pending = 1;
+
+ ring->rx_pending = priv->ring_size;
+ ring->rx_mini_pending = 1;
+ ring->rx_jumbo_pending = 1;
+ ring->tx_pending = 1;
+}
+
+static int cpmac_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring,
+ struct kernel_ethtool_ringparam *kernel_ring,
+ struct netlink_ext_ack *extack)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+
+ if (netif_running(dev))
+ return -EBUSY;
+ priv->ring_size = ring->rx_pending;
+
+ return 0;
+}
+
+static void cpmac_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, "cpmac", sizeof(info->driver));
+ strscpy(info->version, CPMAC_VERSION, sizeof(info->version));
+ snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
+}
+
+static const struct ethtool_ops cpmac_ethtool_ops = {
+ .get_drvinfo = cpmac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = cpmac_get_ringparam,
+ .set_ringparam = cpmac_set_ringparam,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+static void cpmac_adjust_link(struct net_device *dev)
+{
+ struct cpmac_priv *priv = netdev_priv(dev);
+ int new_state = 0;
+
+ spin_lock(&priv->lock);
+ if (dev->phydev->link) {
+ netif_tx_start_all_queues(dev);
+ if (dev->phydev->duplex != priv->oldduplex) {
+ new_state = 1;
+ priv->oldduplex = dev->phydev->duplex;
+ }
+
+ if (dev->phydev->speed != priv->oldspeed) {
+ new_state = 1;
+ priv->oldspeed = dev->phydev->speed;
+ }
+
+ if (!priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 1;
+ }
+ } else if (priv->oldlink) {
+ new_state = 1;
+ priv->oldlink = 0;
+ priv->oldspeed = 0;
+ priv->oldduplex = -1;
+ }
+
+ if (new_state && netif_msg_link(priv) && net_ratelimit())
+ phy_print_status(dev->phydev);
+
+ spin_unlock(&priv->lock);
+}
+
+static int cpmac_open(struct net_device *dev)
+{
+ int i, size, res;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+ struct cpmac_desc *desc;
+ struct sk_buff *skb;
+
+ mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
+ if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
+ if (netif_msg_drv(priv))
+ netdev_err(dev, "failed to request registers\n");
+
+ res = -ENXIO;
+ goto fail_reserve;
+ }
+
+ priv->regs = ioremap(mem->start, resource_size(mem));
+ if (!priv->regs) {
+ if (netif_msg_drv(priv))
+ netdev_err(dev, "failed to remap registers\n");
+
+ res = -ENXIO;
+ goto fail_remap;
+ }
+
+ size = priv->ring_size + CPMAC_QUEUES;
+ priv->desc_ring = dma_alloc_coherent(&dev->dev,
+ sizeof(struct cpmac_desc) * size,
+ &priv->dma_ring,
+ GFP_KERNEL);
+ if (!priv->desc_ring) {
+ res = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < size; i++)
+ priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
+
+ priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
+ for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
+ skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
+ if (unlikely(!skb)) {
+ res = -ENOMEM;
+ goto fail_desc;
+ }
+ desc->skb = skb;
+ desc->data_mapping = dma_map_single(&dev->dev, skb->data,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ desc->hw_data = (u32)desc->data_mapping;
+ desc->buflen = CPMAC_SKB_SIZE;
+ desc->dataflags = CPMAC_OWN;
+ desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
+ desc->next->prev = desc;
+ desc->hw_next = (u32)desc->next->mapping;
+ }
+
+ priv->rx_head->prev->hw_next = (u32)0;
+
+ res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
+ if (res) {
+ if (netif_msg_drv(priv))
+ netdev_err(dev, "failed to obtain irq\n");
+
+ goto fail_irq;
+ }
+
+ atomic_set(&priv->reset_pending, 0);
+ INIT_WORK(&priv->reset_work, cpmac_hw_error);
+ cpmac_hw_start(dev);
+
+ napi_enable(&priv->napi);
+ phy_start(dev->phydev);
+
+ return 0;
+
+fail_irq:
+fail_desc:
+ for (i = 0; i < priv->ring_size; i++) {
+ if (priv->rx_head[i].skb) {
+ dma_unmap_single(&dev->dev,
+ priv->rx_head[i].data_mapping,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ kfree_skb(priv->rx_head[i].skb);
+ }
+ }
+ dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size,
+ priv->desc_ring, priv->dma_ring);
+
+fail_alloc:
+ iounmap(priv->regs);
+
+fail_remap:
+ release_mem_region(mem->start, resource_size(mem));
+
+fail_reserve:
+ return res;
+}
+
+static int cpmac_stop(struct net_device *dev)
+{
+ int i;
+ struct cpmac_priv *priv = netdev_priv(dev);
+ struct resource *mem;
+
+ netif_tx_stop_all_queues(dev);
+
+ cancel_work_sync(&priv->reset_work);
+ napi_disable(&priv->napi);
+ phy_stop(dev->phydev);
+
+ cpmac_hw_stop(dev);
+
+ for (i = 0; i < 8; i++)
+ cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
+ cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
+ cpmac_write(priv->regs, CPMAC_MBP, 0);
+
+ free_irq(dev->irq, dev);
+ iounmap(priv->regs);
+ mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
+ release_mem_region(mem->start, resource_size(mem));
+ priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
+ for (i = 0; i < priv->ring_size; i++) {
+ if (priv->rx_head[i].skb) {
+ dma_unmap_single(&dev->dev,
+ priv->rx_head[i].data_mapping,
+ CPMAC_SKB_SIZE,
+ DMA_FROM_DEVICE);
+ kfree_skb(priv->rx_head[i].skb);
+ }
+ }
+
+ dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
+ (CPMAC_QUEUES + priv->ring_size),
+ priv->desc_ring, priv->dma_ring);
+
+ return 0;
+}
+
+static const struct net_device_ops cpmac_netdev_ops = {
+ .ndo_open = cpmac_open,
+ .ndo_stop = cpmac_stop,
+ .ndo_start_xmit = cpmac_start_xmit,
+ .ndo_tx_timeout = cpmac_tx_timeout,
+ .ndo_set_rx_mode = cpmac_set_multicast_list,
+ .ndo_eth_ioctl = phy_do_ioctl_running,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = eth_mac_addr,
+};
+
+static int external_switch;
+
+static int cpmac_probe(struct platform_device *pdev)
+{
+ int rc, phy_id;
+ char mdio_bus_id[MII_BUS_ID_SIZE];
+ struct resource *mem;
+ struct cpmac_priv *priv;
+ struct net_device *dev;
+ struct plat_cpmac_data *pdata;
+ struct phy_device *phydev = NULL;
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ if (external_switch || dumb_switch) {
+ strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
+ phy_id = pdev->id;
+ } else {
+ for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
+ if (!(pdata->phy_mask & (1 << phy_id)))
+ continue;
+ if (!mdiobus_get_phy(cpmac_mii, phy_id))
+ continue;
+ strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
+ break;
+ }
+ }
+
+ if (phy_id == PHY_MAX_ADDR) {
+ dev_err(&pdev->dev, "no PHY present, falling back "
+ "to switch on MDIO bus 0\n");
+ strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
+ phy_id = pdev->id;
+ }
+ mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0';
+
+ dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
+ if (!dev)
+ return -ENOMEM;
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ platform_set_drvdata(pdev, dev);
+ priv = netdev_priv(dev);
+
+ priv->pdev = pdev;
+ mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
+ if (!mem) {
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ dev->irq = platform_get_irq_byname(pdev, "irq");
+
+ dev->netdev_ops = &cpmac_netdev_ops;
+ dev->ethtool_ops = &cpmac_ethtool_ops;
+
+ netif_napi_add(dev, &priv->napi, cpmac_poll);
+
+ spin_lock_init(&priv->lock);
+ spin_lock_init(&priv->rx_lock);
+ priv->dev = dev;
+ priv->ring_size = 64;
+ priv->msg_enable = netif_msg_init(debug_level, 0xff);
+ eth_hw_addr_set(dev, pdata->dev_addr);
+
+ snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
+ mdio_bus_id, phy_id);
+
+ phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+
+ if (IS_ERR(phydev)) {
+ if (netif_msg_drv(priv))
+ dev_err(&pdev->dev, "Could not attach to PHY\n");
+
+ rc = PTR_ERR(phydev);
+ goto fail;
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ dev_err(&pdev->dev, "Could not register net device\n");
+ goto fail;
+ }
+
+ if (netif_msg_probe(priv)) {
+ dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
+ "mac: %pM\n", (void *)mem->start, dev->irq,
+ priv->phy_name, dev->dev_addr);
+ }
+
+ return 0;
+
+fail:
+ free_netdev(dev);
+ return rc;
+}
+
+static int cpmac_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+
+ unregister_netdev(dev);
+ free_netdev(dev);
+
+ return 0;
+}
+
+static struct platform_driver cpmac_driver = {
+ .driver = {
+ .name = "cpmac",
+ },
+ .probe = cpmac_probe,
+ .remove = cpmac_remove,
+};
+
+int __init cpmac_init(void)
+{
+ u32 mask;
+ int i, res;
+
+ cpmac_mii = mdiobus_alloc();
+ if (cpmac_mii == NULL)
+ return -ENOMEM;
+
+ cpmac_mii->name = "cpmac-mii";
+ cpmac_mii->read = cpmac_mdio_read;
+ cpmac_mii->write = cpmac_mdio_write;
+ cpmac_mii->reset = cpmac_mdio_reset;
+
+ cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
+
+ if (!cpmac_mii->priv) {
+ pr_err("Can't ioremap mdio registers\n");
+ res = -ENXIO;
+ goto fail_alloc;
+ }
+
+ /* FIXME: unhardcode gpio&reset bits */
+ ar7_gpio_disable(26);
+ ar7_gpio_disable(27);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
+ ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
+ ar7_device_reset(AR7_RESET_BIT_EPHY);
+
+ cpmac_mii->reset(cpmac_mii);
+
+ for (i = 0; i < 300; i++) {
+ mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
+ if (mask)
+ break;
+ else
+ msleep(10);
+ }
+
+ mask &= 0x7fffffff;
+ if (mask & (mask - 1)) {
+ external_switch = 1;
+ mask = 0;
+ }
+
+ cpmac_mii->phy_mask = ~(mask | 0x80000000);
+ snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
+
+ res = mdiobus_register(cpmac_mii);
+ if (res)
+ goto fail_mii;
+
+ res = platform_driver_register(&cpmac_driver);
+ if (res)
+ goto fail_cpmac;
+
+ return 0;
+
+fail_cpmac:
+ mdiobus_unregister(cpmac_mii);
+
+fail_mii:
+ iounmap(cpmac_mii->priv);
+
+fail_alloc:
+ mdiobus_free(cpmac_mii);
+
+ return res;
+}
+
+void __exit cpmac_exit(void)
+{
+ platform_driver_unregister(&cpmac_driver);
+ mdiobus_unregister(cpmac_mii);
+ iounmap(cpmac_mii->priv);
+ mdiobus_free(cpmac_mii);
+}
+
+module_init(cpmac_init);
+module_exit(cpmac_exit);
diff --git a/drivers/net/ethernet/ti/cpsw-common.c b/drivers/net/ethernet/ti/cpsw-common.c
new file mode 100644
index 000000000..bfa81bbfc
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-common.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+
+#include "cpsw.h"
+
+#define CTRL_MAC_LO_REG(offset, id) ((offset) + 0x8 * (id))
+#define CTRL_MAC_HI_REG(offset, id) ((offset) + 0x8 * (id) + 0x4)
+
+static int davinci_emac_3517_get_macid(struct device *dev, u16 offset,
+ int slave, u8 *mac_addr)
+{
+ u32 macid_lsb;
+ u32 macid_msb;
+ struct regmap *syscon;
+
+ syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(syscon)) {
+ if (PTR_ERR(syscon) == -ENODEV)
+ return 0;
+ return PTR_ERR(syscon);
+ }
+
+ regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lsb);
+ regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_msb);
+
+ mac_addr[0] = (macid_msb >> 16) & 0xff;
+ mac_addr[1] = (macid_msb >> 8) & 0xff;
+ mac_addr[2] = macid_msb & 0xff;
+ mac_addr[3] = (macid_lsb >> 16) & 0xff;
+ mac_addr[4] = (macid_lsb >> 8) & 0xff;
+ mac_addr[5] = macid_lsb & 0xff;
+
+ return 0;
+}
+
+static int cpsw_am33xx_cm_get_macid(struct device *dev, u16 offset, int slave,
+ u8 *mac_addr)
+{
+ u32 macid_lo;
+ u32 macid_hi;
+ struct regmap *syscon;
+
+ syscon = syscon_regmap_lookup_by_phandle(dev->of_node, "syscon");
+ if (IS_ERR(syscon)) {
+ if (PTR_ERR(syscon) == -ENODEV)
+ return 0;
+ return PTR_ERR(syscon);
+ }
+
+ regmap_read(syscon, CTRL_MAC_LO_REG(offset, slave), &macid_lo);
+ regmap_read(syscon, CTRL_MAC_HI_REG(offset, slave), &macid_hi);
+
+ mac_addr[5] = (macid_lo >> 8) & 0xff;
+ mac_addr[4] = macid_lo & 0xff;
+ mac_addr[3] = (macid_hi >> 24) & 0xff;
+ mac_addr[2] = (macid_hi >> 16) & 0xff;
+ mac_addr[1] = (macid_hi >> 8) & 0xff;
+ mac_addr[0] = macid_hi & 0xff;
+
+ return 0;
+}
+
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr)
+{
+ if (of_machine_is_compatible("ti,dm8148"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,am33xx"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (of_device_is_compatible(dev->of_node, "ti,am3517-emac"))
+ return davinci_emac_3517_get_macid(dev, 0x110, slave, mac_addr);
+
+ if (of_device_is_compatible(dev->of_node, "ti,dm816-emac"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x30, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,am43"))
+ return cpsw_am33xx_cm_get_macid(dev, 0x630, slave, mac_addr);
+
+ if (of_machine_is_compatible("ti,dra7"))
+ return davinci_emac_3517_get_macid(dev, 0x514, slave, mac_addr);
+
+ dev_info(dev, "incompatible machine/device type for reading mac address\n");
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(ti_cm_get_macid);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
new file mode 100644
index 000000000..e8f38e3f7
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ * Module Author: Mugunthan V N <mugunthanvnm@ti.com>
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include "cpsw.h"
+
+/* AM33xx SoC specific definitions for the CONTROL port */
+#define AM33XX_GMII_SEL_MODE_MII 0
+#define AM33XX_GMII_SEL_MODE_RMII 1
+#define AM33XX_GMII_SEL_MODE_RGMII 2
+
+#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
+#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+#define AM33XX_GMII_SEL_RGMII2_IDMODE BIT(5)
+#define AM33XX_GMII_SEL_RGMII1_IDMODE BIT(4)
+
+#define GMII_SEL_MODE_MASK 0x3
+
+struct cpsw_phy_sel_priv {
+ struct device *dev;
+ u32 __iomem *gmii_sel;
+ bool rmii_clock_external;
+ void (*cpsw_phy_sel)(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave);
+};
+
+
+static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+ bool rgmii_id = false;
+
+ reg = readl(priv->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ rgmii_id = true;
+ break;
+
+ default:
+ dev_warn(priv->dev,
+ "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
+ phy_modes(phy_mode));
+ fallthrough;
+ case PHY_INTERFACE_MODE_MII:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ }
+
+ mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
+ mask |= BIT(slave + 4);
+ mode <<= slave * 2;
+
+ if (priv->rmii_clock_external) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RMII1_IO_CLK_EN;
+ else
+ mode |= AM33XX_GMII_SEL_RMII2_IO_CLK_EN;
+ }
+
+ if (rgmii_id) {
+ if (slave == 0)
+ mode |= AM33XX_GMII_SEL_RGMII1_IDMODE;
+ else
+ mode |= AM33XX_GMII_SEL_RGMII2_IDMODE;
+ }
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->gmii_sel);
+}
+
+static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+
+ reg = readl(priv->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ default:
+ dev_warn(priv->dev,
+ "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
+ phy_modes(phy_mode));
+ fallthrough;
+ case PHY_INTERFACE_MODE_MII:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ }
+
+ switch (slave) {
+ case 0:
+ mask = GMII_SEL_MODE_MASK;
+ break;
+ case 1:
+ mask = GMII_SEL_MODE_MASK << 4;
+ mode <<= 4;
+ break;
+ default:
+ dev_err(priv->dev, "invalid slave number...\n");
+ return;
+ }
+
+ if (priv->rmii_clock_external)
+ dev_err(priv->dev, "RMII External clock is not supported\n");
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->gmii_sel);
+}
+
+static struct platform_driver cpsw_phy_sel_driver;
+static int match(struct device *dev, const void *data)
+{
+ const struct device_node *node = (const struct device_node *)data;
+ return dev->of_node == node &&
+ dev->driver == &cpsw_phy_sel_driver.driver;
+}
+
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{
+ struct device_node *node;
+ struct cpsw_phy_sel_priv *priv;
+
+ node = of_parse_phandle(dev->of_node, "cpsw-phy-sel", 0);
+ if (!node) {
+ node = of_get_child_by_name(dev->of_node, "cpsw-phy-sel");
+ if (!node) {
+ dev_err(dev, "Phy mode driver DT not found\n");
+ return;
+ }
+ }
+
+ dev = bus_find_device(&platform_bus_type, NULL, node, match);
+ if (!dev) {
+ dev_err(dev, "unable to find platform device for %pOF\n", node);
+ goto out;
+ }
+
+ priv = dev_get_drvdata(dev);
+
+ priv->cpsw_phy_sel(priv, phy_mode, slave);
+
+ put_device(dev);
+out:
+ of_node_put(node);
+}
+EXPORT_SYMBOL_GPL(cpsw_phy_sel);
+
+static const struct of_device_id cpsw_phy_sel_id_table[] = {
+ {
+ .compatible = "ti,am3352-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_am3352,
+ },
+ {
+ .compatible = "ti,dra7xx-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_dra7xx,
+ },
+ {
+ .compatible = "ti,am43xx-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_am3352,
+ },
+ {}
+};
+
+static int cpsw_phy_sel_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ struct cpsw_phy_sel_priv *priv;
+
+ of_id = of_match_node(cpsw_phy_sel_id_table, pdev->dev.of_node);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv) {
+ dev_err(&pdev->dev, "unable to alloc memory for cpsw phy sel\n");
+ return -ENOMEM;
+ }
+
+ priv->dev = &pdev->dev;
+ priv->cpsw_phy_sel = of_id->data;
+
+ priv->gmii_sel = devm_platform_ioremap_resource_byname(pdev, "gmii-sel");
+ if (IS_ERR(priv->gmii_sel))
+ return PTR_ERR(priv->gmii_sel);
+
+ if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL))
+ priv->rmii_clock_external = true;
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ return 0;
+}
+
+static struct platform_driver cpsw_phy_sel_driver = {
+ .probe = cpsw_phy_sel_probe,
+ .driver = {
+ .name = "cpsw-phy-sel",
+ .of_match_table = cpsw_phy_sel_id_table,
+ },
+};
+builtin_platform_driver(cpsw_phy_sel_driver);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
new file mode 100644
index 000000000..13c9c2d6b
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -0,0 +1,1802 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/sys_soc.h>
+#include <net/page_pool.h>
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+
+#include <linux/pinctrl/consumer.h>
+#include <net/pkt_cls.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
+#include "cpts.h"
+#include "davinci_cpdma.h"
+
+#include <net/pkt_sched.h>
+
+static int debug_level;
+module_param(debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
+
+static int ale_ageout = 10;
+module_param(ale_ageout, int, 0);
+MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
+
+static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
+module_param(rx_packet_max, int, 0);
+MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
+
+static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
+module_param(descs_pool_size, int, 0444);
+MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
+
+#define for_each_slave(priv, func, arg...) \
+ do { \
+ struct cpsw_slave *slave; \
+ struct cpsw_common *cpsw = (priv)->cpsw; \
+ int n; \
+ if (cpsw->data.dual_emac) \
+ (func)((cpsw)->slaves + priv->emac_port, ##arg);\
+ else \
+ for (n = cpsw->data.slaves, \
+ slave = cpsw->slaves; \
+ n; n--) \
+ (func)(slave++, ##arg); \
+ } while (0)
+
+static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv)
+{
+ return cpsw->data.dual_emac ? priv->emac_port : cpsw->data.active_slave;
+}
+
+static int cpsw_get_slave_port(u32 slave_num)
+{
+ return slave_num + 1;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid);
+
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_ale *ale = cpsw->ale;
+ int i;
+
+ if (cpsw->data.dual_emac) {
+ bool flag = false;
+
+ /* Enabling promiscuous mode for one interface will be
+ * common for both the interface as the interface shares
+ * the same hardware resource.
+ */
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
+ flag = true;
+
+ if (!enable && flag) {
+ enable = true;
+ dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+ }
+
+ if (enable) {
+ /* Enable Bypass */
+ cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
+
+ dev_dbg(&ndev->dev, "promiscuity enabled\n");
+ } else {
+ /* Disable Bypass */
+ cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
+ dev_dbg(&ndev->dev, "promiscuity disabled\n");
+ }
+ } else {
+ if (enable) {
+ unsigned long timeout = jiffies + HZ;
+
+ /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
+ for (i = 0; i <= cpsw->data.slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, 1);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, 1);
+ }
+
+ /* Clear All Untouched entries */
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+ do {
+ cpu_relax();
+ if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
+ break;
+ } while (time_after(timeout, jiffies));
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
+
+ /* Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(&ndev->dev, "promiscuity enabled\n");
+ } else {
+ /* Don't Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
+
+ /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
+ for (i = 0; i <= cpsw->data.slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, 0);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, 0);
+ }
+ dev_dbg(&ndev->dev, "promiscuity disabled\n");
+ }
+ }
+}
+
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+ int vid, int add)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int mask, flags, ret;
+
+ if (vid < 0) {
+ if (cpsw->data.dual_emac)
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
+ else
+ vid = 0;
+ }
+
+ mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
+ flags = vid ? ALE_VLAN : 0;
+
+ if (add)
+ ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+ else
+ ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+ return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0, ret = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (found)
+ sync_ctx->consumed++;
+
+ if (sync_ctx->flush) {
+ if (!found)
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+ }
+
+ if (found)
+ ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+ return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+ int ret;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 0;
+
+ ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num && !ret)
+ ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+ return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 1;
+
+ vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed == num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ sync_ctx->consumed++;
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.consumed = 0;
+
+ vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_port = -1;
+
+ if (cpsw->data.dual_emac)
+ slave_port = priv->emac_port + 1;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ cpsw_set_promiscious(ndev, true);
+ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, slave_port);
+ return;
+ } else {
+ /* Disable promiscuous mode */
+ cpsw_set_promiscious(ndev, false);
+ }
+
+ /* Restore allmulti on vlans if necessary */
+ cpsw_ale_set_allmulti(cpsw->ale,
+ ndev->flags & IFF_ALLMULTI, slave_port);
+
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+ cpsw_del_mc_addr);
+}
+
+static unsigned int cpsw_rxbuf_total_len(unsigned int len)
+{
+ len += CPSW_HEADROOM_NA;
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ return SKB_DATA_ALIGN(len);
+}
+
+static void cpsw_rx_handler(void *token, int len, int status)
+{
+ struct page *new_page, *page = token;
+ void *pa = page_address(page);
+ struct cpsw_meta_xdp *xmeta = pa + CPSW_XMETA_OFFSET;
+ struct cpsw_common *cpsw = ndev_to_cpsw(xmeta->ndev);
+ int pkt_size = cpsw->rx_packet_max;
+ int ret = 0, port, ch = xmeta->ch;
+ int headroom = CPSW_HEADROOM_NA;
+ struct net_device *ndev = xmeta->ndev;
+ struct cpsw_priv *priv;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ dma_addr_t dma;
+
+ if (cpsw->data.dual_emac && status >= 0) {
+ port = CPDMA_RX_SOURCE_PORT(status);
+ if (port)
+ ndev = cpsw->slaves[--port].ndev;
+ }
+
+ priv = netdev_priv(ndev);
+ pool = cpsw->page_pool[ch];
+ if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+ /* In dual emac mode check for all interfaces */
+ if (cpsw->data.dual_emac && cpsw->usage_count &&
+ (status >= 0)) {
+ /* The packet received is for the interface which
+ * is already down and the other interface is up
+ * and running, instead of freeing which results
+ * in reducing of the number of rx descriptor in
+ * DMA engine, requeue page back to cpdma.
+ */
+ new_page = page;
+ goto requeue;
+ }
+
+ /* the interface is going down, pages are purged */
+ page_pool_recycle_direct(pool, page);
+ return;
+ }
+
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
+ ndev->stats.rx_dropped++;
+ goto requeue;
+ }
+
+ if (priv->xdp_prog) {
+ int size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
+ if (status & CPDMA_RX_VLAN_ENCAP) {
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ }
+
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
+
+ port = priv->emac_port + cpsw->data.dual_emac;
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, port, &len);
+ if (ret != CPSW_XDP_PASS)
+ goto requeue;
+
+ headroom = xdp.data - xdp.data_hard_start;
+
+ /* XDP prog can modify vlan tag, so can't use encap header */
+ status &= ~CPDMA_RX_VLAN_ENCAP;
+ }
+
+ /* pass skb to netstack if no XDP prog or returned XDP_PASS */
+ skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
+ }
+
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+ skb->dev = ndev;
+ if (status & CPDMA_RX_VLAN_ENCAP)
+ cpsw_rx_vlan_encap(skb);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* mark skb for recycling */
+ skb_mark_for_recycle(skb);
+ netif_receive_skb(skb);
+
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+
+requeue:
+ xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
+ ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
+ pkt_size, 0);
+ if (ret < 0) {
+ WARN_ON(ret == -ENOMEM);
+ page_pool_recycle_direct(pool, new_page);
+ }
+}
+
+static void _cpsw_adjust_link(struct cpsw_slave *slave,
+ struct cpsw_priv *priv, bool *link)
+{
+ struct phy_device *phy = slave->phy;
+ u32 mac_control = 0;
+ u32 slave_port;
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (!phy)
+ return;
+
+ slave_port = cpsw_get_slave_port(slave->slave_num);
+
+ if (phy->link) {
+ mac_control = CPSW_SL_CTL_GMII_EN;
+
+ if (phy->speed == 1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (phy->duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* set speed_in input in case RMII mode is used in 100Mbps */
+ if (phy->speed == 100)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ /* in band mode only works in 10Mbps RGMII mode */
+ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
+ mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
+
+ if (priv->rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (priv->tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ if (mac_control != slave->mac_control)
+ cpsw_sl_ctl_set(slave->mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(cpsw->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ *link = true;
+
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed &&
+ !cpsw_shp_is_off(priv))
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
+ } else {
+ mac_control = 0;
+ /* disable forwarding */
+ cpsw_ale_control_set(cpsw->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_wait_for_idle(slave->mac_sl, 100);
+
+ cpsw_sl_ctl_reset(slave->mac_sl);
+ }
+
+ if (mac_control != slave->mac_control)
+ phy_print_status(phy);
+
+ slave->mac_control = mac_control;
+}
+
+static void cpsw_adjust_link(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ bool link = false;
+
+ for_each_slave(priv, _cpsw_adjust_link, priv, &link);
+
+ if (link) {
+ if (cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+
+ netif_carrier_on(ndev);
+ if (netif_running(ndev))
+ netif_tx_wake_all_queues(ndev);
+ } else {
+ netif_carrier_off(ndev);
+ netif_tx_stop_all_queues(ndev);
+ }
+}
+
+static inline void cpsw_add_dual_emac_def_ale_entries(
+ struct cpsw_priv *priv, struct cpsw_slave *slave,
+ u32 slave_port)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
+ else
+ slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
+ cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
+ port_mask, port_mask, 0);
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN |
+ ALE_SECURE, slave->port_vlan);
+ cpsw_ale_control_set(cpsw->ale, slave_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 1);
+}
+
+static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ u32 slave_port;
+ struct phy_device *phy;
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
+
+ /* setup priority mapping */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
+ RX_PRIORITY_MAPPING);
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
+ break;
+ }
+
+ /* setup max packet size, and mac address */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
+ cpsw->rx_packet_max);
+ cpsw_set_slave_mac(slave, priv);
+
+ slave->mac_control = 0; /* no link yet */
+
+ slave_port = cpsw_get_slave_port(slave->slave_num);
+
+ if (cpsw->data.dual_emac)
+ cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
+ else
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
+
+ if (slave->data->phy_node) {
+ phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+ &cpsw_adjust_link, 0, slave->data->phy_if);
+ if (!phy) {
+ dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
+ slave->data->phy_node,
+ slave->slave_num);
+ return;
+ }
+ } else {
+ phy = phy_connect(priv->ndev, slave->data->phy_id,
+ &cpsw_adjust_link, slave->data->phy_if);
+ if (IS_ERR(phy)) {
+ dev_err(priv->dev,
+ "phy \"%s\" not found on slave %d, err %ld\n",
+ slave->data->phy_id, slave->slave_num,
+ PTR_ERR(phy));
+ return;
+ }
+ }
+
+ slave->phy = phy;
+
+ phy_attached_info(slave->phy);
+
+ phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ if (!IS_ERR(slave->data->ifphy))
+ phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
+ slave->data->phy_if);
+ else
+ cpsw_phy_sel(cpsw->dev, slave->phy->interface,
+ slave->slave_num);
+}
+
+static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ const int vlan = cpsw->data.default_vlan;
+ u32 reg;
+ int i;
+ int unreg_mcast_mask;
+
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+ for (i = 0; i < cpsw->data.slaves; i++)
+ slave_write(cpsw->slaves + i, vlan, reg);
+
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = ALE_ALL_PORTS;
+ else
+ unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
+ ALE_ALL_PORTS, ALE_ALL_PORTS,
+ unreg_mcast_mask);
+}
+
+static void cpsw_init_host_port(struct cpsw_priv *priv)
+{
+ u32 fifo_mode;
+ u32 control_reg;
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ /* soft reset the controller and initialize ale */
+ soft_reset("cpsw", &cpsw->regs->soft_reset);
+ cpsw_ale_start(cpsw->ale);
+
+ /* switch to vlan unaware mode */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
+ CPSW_ALE_VLAN_AWARE);
+ control_reg = readl(&cpsw->regs->control);
+ control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
+ writel(control_reg, &cpsw->regs->control);
+ fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
+ CPSW_FIFO_NORMAL_MODE;
+ writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
+
+ /* setup host port priority mapping */
+ writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ if (!cpsw->data.dual_emac) {
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
+ 0, 0);
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
+ }
+}
+
+static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
+{
+ u32 slave_port;
+
+ slave_port = cpsw_get_slave_port(slave->slave_num);
+
+ if (!slave->phy)
+ return;
+ phy_stop(slave->phy);
+ phy_disconnect(slave->phy);
+ slave->phy = NULL;
+ cpsw_ale_control_set(cpsw->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
+}
+
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct cpsw_priv *priv = arg;
+
+ if (!vdev)
+ return 0;
+
+ cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+ return 0;
+}
+
+/* restore resources after port reset */
+static void cpsw_restore(struct cpsw_priv *priv)
+{
+ /* restore vlan configurations */
+ vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
+ /* restore MQPRIO offload */
+ for_each_slave(priv, cpsw_mqprio_resume, priv);
+
+ /* restore CBS offload */
+ for_each_slave(priv, cpsw_cbs_resume, priv);
+}
+
+static int cpsw_ndo_open(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+ u32 reg;
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ netif_carrier_off(ndev);
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto err_cleanup;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto err_cleanup;
+ }
+
+ reg = cpsw->version;
+
+ dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
+ CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
+ CPSW_RTL_VERSION(reg));
+
+ /* Initialize host and slave ports */
+ if (!cpsw->usage_count)
+ cpsw_init_host_port(priv);
+ for_each_slave(priv, cpsw_slave_open, priv);
+
+ /* Add default VLAN */
+ if (!cpsw->data.dual_emac)
+ cpsw_add_default_vlan(priv);
+ else
+ cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
+ ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
+
+ /* initialize shared resources for every ndev */
+ if (!cpsw->usage_count) {
+ /* disable priority elevation */
+ writel_relaxed(0, &cpsw->regs->ptype);
+
+ /* enable statistics collection only on all ports */
+ writel_relaxed(0x7, &cpsw->regs->stat_port_en);
+
+ /* Enable internal fifo flow control */
+ writel(0x7, &cpsw->regs->flow_control);
+
+ napi_enable(&cpsw->napi_rx);
+ napi_enable(&cpsw->napi_tx);
+
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
+ }
+
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
+ }
+
+ /* create rxqs for both infs in dual mac as they use same pool
+ * and must be destroyed together when no users.
+ */
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret < 0)
+ goto err_cleanup;
+
+ if (cpsw->cpts) {
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+ else
+ writel(0x10, &cpsw->wr_regs->misc_en);
+ }
+ }
+
+ cpsw_restore(priv);
+
+ /* Enable Interrupt pacing if configured */
+ if (cpsw->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = cpsw->coal_intvl;
+ cpsw_set_coalesce(ndev, &coal, NULL, NULL);
+ }
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ cpsw->usage_count++;
+
+ return 0;
+
+err_cleanup:
+ if (!cpsw->usage_count) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
+ cpdma_ctlr_stop(cpsw->dma);
+ cpsw_destroy_xdp_rxqs(cpsw);
+ }
+
+ for_each_slave(priv, cpsw_slave_stop, cpsw);
+ pm_runtime_put_sync(cpsw->dev);
+ netif_carrier_off(priv->ndev);
+ return ret;
+}
+
+static int cpsw_ndo_stop(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ cpsw_info(priv, ifdown, "shutting down cpsw device\n");
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
+ netif_tx_stop_all_queues(priv->ndev);
+ netif_carrier_off(priv->ndev);
+
+ if (cpsw->usage_count <= 1) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
+ cpts_unregister(cpsw->cpts);
+ cpsw_intr_disable(cpsw);
+ cpdma_ctlr_stop(cpsw->dma);
+ cpsw_ale_stop(cpsw->ale);
+ cpsw_destroy_xdp_rxqs(cpsw);
+ }
+ for_each_slave(priv, cpsw_slave_stop, cpsw);
+
+ if (cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+
+ cpsw->usage_count--;
+ pm_runtime_put_sync(cpsw->dev);
+ return 0;
+}
+
+static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpts *cpts = cpsw->cpts;
+ struct netdev_queue *txq;
+ struct cpdma_chan *txch;
+ int ret, q_idx;
+
+ if (skb_put_padto(skb, CPSW_MIN_PACKET_SIZE)) {
+ cpsw_err(priv, tx_err, "packet pad failed\n");
+ ndev->stats.tx_dropped++;
+ return NET_XMIT_DROP;
+ }
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ q_idx = skb_get_queue_mapping(skb);
+ if (q_idx >= cpsw->tx_ch_num)
+ q_idx = q_idx % cpsw->tx_ch_num;
+
+ txch = cpsw->txv[q_idx].ch;
+ txq = netdev_get_tx_queue(ndev, q_idx);
+ skb_tx_timestamp(skb);
+ ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
+ priv->emac_port + cpsw->data.dual_emac);
+ if (unlikely(ret != 0)) {
+ cpsw_err(priv, tx_err, "desc submit failed\n");
+ goto fail;
+ }
+
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+ }
+
+ return NETDEV_TX_OK;
+fail:
+ ndev->stats.tx_dropped++;
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+}
+
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct cpsw_common *cpsw = priv->cpsw;
+ int flags = 0;
+ u16 vid = 0;
+ int ret;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ if (cpsw->data.dual_emac) {
+ vid = cpsw->slaves[priv->emac_port].port_vlan;
+ flags = ALE_VLAN;
+ }
+
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
+ flags, vid);
+ cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
+ flags, vid);
+
+ memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
+ eth_hw_addr_set(ndev, priv->mac_addr);
+ for_each_slave(priv, cpsw_set_slave_mac, priv);
+
+ pm_runtime_put(cpsw->dev);
+
+ return 0;
+}
+
+static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+ unsigned short vid)
+{
+ int ret;
+ int unreg_mcast_mask = 0;
+ int mcast_mask;
+ u32 port_mask;
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (cpsw->data.dual_emac) {
+ port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
+
+ mcast_mask = ALE_PORT_HOST;
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = mcast_mask;
+ } else {
+ port_mask = ALE_ALL_PORTS;
+ mcast_mask = port_mask;
+
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = ALE_ALL_PORTS;
+ else
+ unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+ }
+
+ ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
+ unreg_mcast_mask);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (ret != 0)
+ goto clean_vid;
+
+ ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ mcast_mask, ALE_VLAN, vid, 0);
+ if (ret != 0)
+ goto clean_vlan_ucast;
+ return 0;
+
+clean_vlan_ucast:
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+clean_vid:
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ if (cpsw->data.dual_emac) {
+ /* In dual EMAC, reserved VLAN id should not be used for
+ * creating VLAN interfaces as this can break the dual
+ * EMAC port separation
+ */
+ int i;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ }
+
+ dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ ret = cpsw_add_vlan_ale_entry(priv, vid);
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ if (cpsw->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (vid == cpsw->slaves[i].port_vlan)
+ goto err;
+ }
+ }
+
+ dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ ret |= cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct xdp_frame *xdpf;
+ int i, nxmit = 0, port;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ if (xdpf->len < CPSW_MIN_PACKET_SIZE)
+ break;
+
+ port = priv->emac_port + cpsw->data.dual_emac;
+ if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
+ break;
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cpsw_ndo_poll_controller(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ cpsw_intr_disable(cpsw);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+ cpsw_intr_enable(cpsw);
+}
+#endif
+
+static const struct net_device_ops cpsw_netdev_ops = {
+ .ndo_open = cpsw_ndo_open,
+ .ndo_stop = cpsw_ndo_stop,
+ .ndo_start_xmit = cpsw_ndo_start_xmit,
+ .ndo_set_mac_address = cpsw_ndo_set_mac_address,
+ .ndo_eth_ioctl = cpsw_ndo_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = cpsw_ndo_tx_timeout,
+ .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
+ .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cpsw_ndo_poll_controller,
+#endif
+ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = cpsw_ndo_setup_tc,
+ .ndo_bpf = cpsw_ndo_bpf,
+ .ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+};
+
+static void cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct platform_device *pdev = to_platform_device(cpsw->dev);
+
+ strscpy(info->driver, "cpsw", sizeof(info->driver));
+ strscpy(info->version, "1.0", sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+}
+
+static int cpsw_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ bool link;
+
+ priv->rx_pause = pause->rx_pause ? true : false;
+ priv->tx_pause = pause->tx_pause ? true : false;
+
+ for_each_slave(priv, _cpsw_adjust_link, priv, &link);
+ return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
+}
+
+static const struct ethtool_ops cpsw_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
+ .get_drvinfo = cpsw_get_drvinfo,
+ .get_msglevel = cpsw_get_msglevel,
+ .set_msglevel = cpsw_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = cpsw_get_ts_info,
+ .get_coalesce = cpsw_get_coalesce,
+ .set_coalesce = cpsw_set_coalesce,
+ .get_sset_count = cpsw_get_sset_count,
+ .get_strings = cpsw_get_strings,
+ .get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_pauseparam = cpsw_get_pauseparam,
+ .set_pauseparam = cpsw_set_pauseparam,
+ .get_wol = cpsw_get_wol,
+ .set_wol = cpsw_set_wol,
+ .get_regs_len = cpsw_get_regs_len,
+ .get_regs = cpsw_get_regs,
+ .begin = cpsw_ethtool_op_begin,
+ .complete = cpsw_ethtool_op_complete,
+ .get_channels = cpsw_get_channels,
+ .set_channels = cpsw_set_channels,
+ .get_link_ksettings = cpsw_get_link_ksettings,
+ .set_link_ksettings = cpsw_set_link_ksettings,
+ .get_eee = cpsw_get_eee,
+ .set_eee = cpsw_set_eee,
+ .nway_reset = cpsw_nway_reset,
+ .get_ringparam = cpsw_get_ringparam,
+ .set_ringparam = cpsw_set_ringparam,
+};
+
+static int cpsw_probe_dt(struct cpsw_platform_data *data,
+ struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *slave_node;
+ int i = 0, ret;
+ u32 prop;
+
+ if (!node)
+ return -EINVAL;
+
+ if (of_property_read_u32(node, "slaves", &prop)) {
+ dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
+ return -EINVAL;
+ }
+ data->slaves = prop;
+
+ if (of_property_read_u32(node, "active_slave", &prop)) {
+ dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
+ return -EINVAL;
+ }
+ data->active_slave = prop;
+
+ data->slave_data = devm_kcalloc(&pdev->dev,
+ data->slaves,
+ sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
+ if (!data->slave_data)
+ return -ENOMEM;
+
+ if (of_property_read_u32(node, "cpdma_channels", &prop)) {
+ dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
+ return -EINVAL;
+ }
+ data->channels = prop;
+
+ if (of_property_read_u32(node, "bd_ram_size", &prop)) {
+ dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
+ return -EINVAL;
+ }
+ data->bd_ram_size = prop;
+
+ if (of_property_read_u32(node, "mac_control", &prop)) {
+ dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
+ return -EINVAL;
+ }
+ data->mac_control = prop;
+
+ if (of_property_read_bool(node, "dual_emac"))
+ data->dual_emac = true;
+
+ /*
+ * Populate all the child nodes here...
+ */
+ ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
+ /* We do not want to force this, as in some cases may not have child */
+ if (ret)
+ dev_warn(&pdev->dev, "Doesn't have any child node\n");
+
+ for_each_available_child_of_node(node, slave_node) {
+ struct cpsw_slave_data *slave_data = data->slave_data + i;
+ int lenp;
+ const __be32 *parp;
+
+ /* This is no slave child node, continue */
+ if (!of_node_name_eq(slave_node, "slave"))
+ continue;
+
+ slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
+ NULL);
+ if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
+ IS_ERR(slave_data->ifphy)) {
+ ret = PTR_ERR(slave_data->ifphy);
+ dev_err(&pdev->dev,
+ "%d: Error retrieving port phy: %d\n", i, ret);
+ goto err_node_put;
+ }
+
+ slave_data->slave_node = slave_node;
+ slave_data->phy_node = of_parse_phandle(slave_node,
+ "phy-handle", 0);
+ parp = of_get_property(slave_node, "phy_id", &lenp);
+ if (slave_data->phy_node) {
+ dev_dbg(&pdev->dev,
+ "slave[%d] using phy-handle=\"%pOF\"\n",
+ i, slave_data->phy_node);
+ } else if (of_phy_is_fixed_link(slave_node)) {
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ ret = of_phy_register_fixed_link(slave_node);
+ if (ret) {
+ dev_err_probe(&pdev->dev, ret, "failed to register fixed-link phy\n");
+ goto err_node_put;
+ }
+ slave_data->phy_node = of_node_get(slave_node);
+ } else if (parp) {
+ u32 phyid;
+ struct device_node *mdio_node;
+ struct platform_device *mdio;
+
+ if (lenp != (sizeof(__be32) * 2)) {
+ dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
+ goto no_phy_slave;
+ }
+ mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
+ phyid = be32_to_cpup(parp+1);
+ mdio = of_find_device_by_node(mdio_node);
+ of_node_put(mdio_node);
+ if (!mdio) {
+ dev_err(&pdev->dev, "Missing mdio platform device\n");
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
+ put_device(&mdio->dev);
+ } else {
+ dev_err(&pdev->dev,
+ "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
+ i);
+ goto no_phy_slave;
+ }
+ ret = of_get_phy_mode(slave_node, &slave_data->phy_if);
+ if (ret) {
+ dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+ i);
+ goto err_node_put;
+ }
+
+no_phy_slave:
+ ret = of_get_mac_address(slave_node, slave_data->mac_addr);
+ if (ret) {
+ ret = ti_cm_get_macid(&pdev->dev, i,
+ slave_data->mac_addr);
+ if (ret)
+ goto err_node_put;
+ }
+ if (data->dual_emac) {
+ if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
+ &prop)) {
+ dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
+ slave_data->dual_emac_res_vlan = i+1;
+ dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
+ slave_data->dual_emac_res_vlan, i);
+ } else {
+ slave_data->dual_emac_res_vlan = prop;
+ }
+ }
+
+ i++;
+ if (i == data->slaves) {
+ ret = 0;
+ goto err_node_put;
+ }
+ }
+
+ return 0;
+
+err_node_put:
+ of_node_put(slave_node);
+ return ret;
+}
+
+static void cpsw_remove_dt(struct platform_device *pdev)
+{
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *slave_node;
+ int i = 0;
+
+ for_each_available_child_of_node(node, slave_node) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+ if (!of_node_name_eq(slave_node, "slave"))
+ continue;
+
+ if (of_phy_is_fixed_link(slave_node))
+ of_phy_deregister_fixed_link(slave_node);
+
+ of_node_put(slave_data->phy_node);
+
+ i++;
+ if (i == data->slaves) {
+ of_node_put(slave_node);
+ break;
+ }
+ }
+
+ of_platform_depopulate(&pdev->dev);
+}
+
+static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct net_device *ndev;
+ struct cpsw_priv *priv_sl2;
+ int ret = 0;
+
+ ndev = devm_alloc_etherdev_mqs(cpsw->dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
+ if (!ndev) {
+ dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ priv_sl2 = netdev_priv(ndev);
+ priv_sl2->cpsw = cpsw;
+ priv_sl2->ndev = ndev;
+ priv_sl2->dev = &ndev->dev;
+ priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+
+ if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
+ memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
+ ETH_ALEN);
+ dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
+ priv_sl2->mac_addr);
+ } else {
+ eth_random_addr(priv_sl2->mac_addr);
+ dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
+ priv_sl2->mac_addr);
+ }
+ eth_hw_addr_set(ndev, priv_sl2->mac_addr);
+
+ priv_sl2->emac_port = 1;
+ cpsw->slaves[1].ndev = ndev;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, cpsw->dev);
+ ndev->dev.of_node = cpsw->slaves[1].data->slave_node;
+ ret = register_netdev(ndev);
+ if (ret)
+ dev_err(cpsw->dev, "cpsw: error registering net device\n");
+
+ return ret;
+}
+
+static const struct of_device_id cpsw_of_mtable[] = {
+ { .compatible = "ti,cpsw"},
+ { .compatible = "ti,am335x-cpsw"},
+ { .compatible = "ti,am4372-cpsw"},
+ { .compatible = "ti,dra7-cpsw"},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
+static const struct soc_device_attribute cpsw_soc_devices[] = {
+ { .family = "AM33xx", .revision = "ES1.0"},
+ { /* sentinel */ }
+};
+
+static int cpsw_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ struct cpsw_platform_data *data;
+ struct net_device *ndev;
+ struct cpsw_priv *priv;
+ void __iomem *ss_regs;
+ struct resource *ss_res;
+ struct gpio_descs *mode;
+ const struct soc_device_attribute *soc;
+ struct cpsw_common *cpsw;
+ int ret = 0, ch;
+ int irq;
+
+ cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ if (!cpsw)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, cpsw);
+ cpsw_slave_index = cpsw_slave_index_priv;
+
+ cpsw->dev = dev;
+
+ mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
+ if (IS_ERR(mode)) {
+ ret = PTR_ERR(mode);
+ dev_err(dev, "gpio request failed, ret %d\n", ret);
+ return ret;
+ }
+
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "fck is not found %d\n", ret);
+ return ret;
+ }
+ cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
+
+ ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
+ if (IS_ERR(ss_regs))
+ return PTR_ERR(ss_regs);
+ cpsw->regs = ss_regs;
+
+ cpsw->wr_regs = devm_platform_ioremap_resource(pdev, 1);
+ if (IS_ERR(cpsw->wr_regs))
+ return PTR_ERR(cpsw->wr_regs);
+
+ /* RX IRQ */
+ irq = platform_get_irq(pdev, 1);
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[0] = irq;
+
+ /* TX IRQ */
+ irq = platform_get_irq(pdev, 2);
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[1] = irq;
+
+ /* get misc irq*/
+ irq = platform_get_irq(pdev, 3);
+ if (irq <= 0)
+ return irq;
+ cpsw->misc_irq = irq;
+
+ /*
+ * This may be required here for child devices.
+ */
+ pm_runtime_enable(dev);
+
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0)
+ goto clean_runtime_disable_ret;
+
+ ret = cpsw_probe_dt(&cpsw->data, pdev);
+ if (ret)
+ goto clean_dt_ret;
+
+ soc = soc_device_match(cpsw_soc_devices);
+ if (soc)
+ cpsw->quirk_irq = true;
+
+ data = &cpsw->data;
+ cpsw->slaves = devm_kcalloc(dev,
+ data->slaves, sizeof(struct cpsw_slave),
+ GFP_KERNEL);
+ if (!cpsw->slaves) {
+ ret = -ENOMEM;
+ goto clean_dt_ret;
+ }
+
+ cpsw->rx_packet_max = max(rx_packet_max, CPSW_MAX_PACKET_SIZE);
+ cpsw->descs_pool_size = descs_pool_size;
+
+ ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
+ ss_res->start + CPSW2_BD_OFFSET,
+ descs_pool_size);
+ if (ret)
+ goto clean_dt_ret;
+
+ ch = cpsw->quirk_irq ? 0 : 7;
+ cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
+ if (IS_ERR(cpsw->txv[0].ch)) {
+ dev_err(dev, "error initializing tx dma channel\n");
+ ret = PTR_ERR(cpsw->txv[0].ch);
+ goto clean_cpts;
+ }
+
+ cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
+ if (IS_ERR(cpsw->rxv[0].ch)) {
+ dev_err(dev, "error initializing rx dma channel\n");
+ ret = PTR_ERR(cpsw->rxv[0].ch);
+ goto clean_cpts;
+ }
+ cpsw_split_res(cpsw);
+
+ /* setup netdev */
+ ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES, CPSW_MAX_QUEUES);
+ if (!ndev) {
+ dev_err(dev, "error allocating net_device\n");
+ ret = -ENOMEM;
+ goto clean_cpts;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->cpsw = cpsw;
+ priv->ndev = ndev;
+ priv->dev = dev;
+ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv->emac_port = 0;
+
+ if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
+ memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
+ dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
+ } else {
+ eth_random_addr(priv->mac_addr);
+ dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
+ }
+
+ eth_hw_addr_set(ndev, priv->mac_addr);
+
+ cpsw->slaves[0].ndev = ndev;
+
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
+ netif_napi_add(ndev, &cpsw->napi_rx,
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
+ netif_napi_add_tx(ndev, &cpsw->napi_tx,
+ cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll);
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, dev);
+ ndev->dev.of_node = cpsw->slaves[0].data->slave_node;
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(dev, "error registering net device\n");
+ ret = -ENODEV;
+ goto clean_cpts;
+ }
+
+ if (cpsw->data.dual_emac) {
+ ret = cpsw_probe_dual_emac(priv);
+ if (ret) {
+ cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
+ goto clean_unregister_netdev_ret;
+ }
+ }
+
+ /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
+ * MISC IRQs which are always kept disabled with this driver so
+ * we will not request them.
+ *
+ * If anyone wants to implement support for those, make sure to
+ * first request and append them to irqs_table array.
+ */
+ ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
+ 0, dev_name(dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
+ }
+
+
+ ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
+ 0, dev_name(&pdev->dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
+ }
+
+ if (!cpsw->cpts)
+ goto skip_cpts;
+
+ ret = devm_request_irq(&pdev->dev, cpsw->misc_irq, cpsw_misc_interrupt,
+ 0, dev_name(&pdev->dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching misc irq (%d)\n", ret);
+ goto clean_unregister_netdev_ret;
+ }
+
+ /* Enable misc CPTS evnt_pend IRQ */
+ cpts_set_irqpoll(cpsw->cpts, false);
+
+skip_cpts:
+ cpsw_notice(priv, probe,
+ "initialized device (regs %pa, irq %d, pool size %d)\n",
+ &ss_res->start, cpsw->irqs_table[0], descs_pool_size);
+
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+clean_unregister_netdev_ret:
+ unregister_netdev(ndev);
+clean_cpts:
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+ cpsw_remove_dt(pdev);
+ pm_runtime_put_sync(&pdev->dev);
+clean_runtime_disable_ret:
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int cpsw_remove(struct platform_device *pdev)
+{
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
+ int i, ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev)
+ unregister_netdev(cpsw->slaves[i].ndev);
+
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+ cpsw_remove_dt(pdev);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int cpsw_suspend(struct device *dev)
+{
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev)
+ if (netif_running(cpsw->slaves[i].ndev))
+ cpsw_ndo_stop(cpsw->slaves[i].ndev);
+
+ rtnl_unlock();
+
+ /* Select sleep pin state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int cpsw_resume(struct device *dev)
+{
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(dev);
+
+ /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev)
+ if (netif_running(cpsw->slaves[i].ndev))
+ cpsw_ndo_open(cpsw->slaves[i].ndev);
+
+ rtnl_unlock();
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
+
+static struct platform_driver cpsw_driver = {
+ .driver = {
+ .name = "cpsw",
+ .pm = &cpsw_pm_ops,
+ .of_match_table = cpsw_of_mtable,
+ },
+ .probe = cpsw_probe,
+ .remove = cpsw_remove,
+};
+
+module_platform_driver(cpsw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
+MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
+MODULE_DESCRIPTION("TI CPSW Ethernet driver");
diff --git a/drivers/net/ethernet/ti/cpsw.h b/drivers/net/ethernet/ti/cpsw.h
new file mode 100644
index 000000000..35d602f03
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2013 Texas Instruments
+ *
+ */
+#ifndef __CPSW_H__
+#define __CPSW_H__
+
+#include <linux/if_ether.h>
+#include <linux/phy.h>
+
+#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
+ ((mac)[2] << 16) | ((mac)[3] << 24))
+#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
+
+#if IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL)
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave);
+#else
+static inline
+void cpsw_phy_sel(struct device *dev, phy_interface_t phy_mode, int slave)
+{}
+#endif
+int ti_cm_get_macid(struct device *dev, int slave, u8 *mac_addr);
+
+#endif /* __CPSW_H__ */
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
new file mode 100644
index 000000000..2647c18d4
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -0,0 +1,1472 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments N-Port Ethernet Switch Address Lookup Engine
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ */
+#include <linux/bitmap.h>
+#include <linux/if_vlan.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/stat.h>
+#include <linux/sysfs.h>
+#include <linux/etherdevice.h>
+
+#include "cpsw_ale.h"
+
+#define BITMASK(bits) (BIT(bits) - 1)
+
+#define ALE_VERSION_MAJOR(rev, mask) (((rev) >> 8) & (mask))
+#define ALE_VERSION_MINOR(rev) (rev & 0xff)
+#define ALE_VERSION_1R3 0x0103
+#define ALE_VERSION_1R4 0x0104
+
+/* ALE Registers */
+#define ALE_IDVER 0x00
+#define ALE_STATUS 0x04
+#define ALE_CONTROL 0x08
+#define ALE_PRESCALE 0x10
+#define ALE_AGING_TIMER 0x14
+#define ALE_UNKNOWNVLAN 0x18
+#define ALE_TABLE_CONTROL 0x20
+#define ALE_TABLE 0x34
+#define ALE_PORTCTL 0x40
+
+/* ALE NetCP NU switch specific Registers */
+#define ALE_UNKNOWNVLAN_MEMBER 0x90
+#define ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD 0x94
+#define ALE_UNKNOWNVLAN_REG_MCAST_FLOOD 0x98
+#define ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS 0x9C
+#define ALE_VLAN_MASK_MUX(reg) (0xc0 + (0x4 * (reg)))
+
+#define AM65_CPSW_ALE_THREAD_DEF_REG 0x134
+
+/* ALE_AGING_TIMER */
+#define ALE_AGING_TIMER_MASK GENMASK(23, 0)
+
+#define ALE_RATE_LIMIT_MIN_PPS 1000
+
+/**
+ * struct ale_entry_fld - The ALE tbl entry field description
+ * @start_bit: field start bit
+ * @num_bits: field bit length
+ * @flags: field flags
+ */
+struct ale_entry_fld {
+ u8 start_bit;
+ u8 num_bits;
+ u8 flags;
+};
+
+enum {
+ CPSW_ALE_F_STATUS_REG = BIT(0), /* Status register present */
+ CPSW_ALE_F_HW_AUTOAGING = BIT(1), /* HW auto aging */
+
+ CPSW_ALE_F_COUNT
+};
+
+/**
+ * struct cpsw_ale_dev_id - The ALE version/SoC specific configuration
+ * @dev_id: ALE version/SoC id
+ * @features: features supported by ALE
+ * @tbl_entries: number of ALE entries
+ * @major_ver_mask: mask of ALE Major Version Value in ALE_IDVER reg.
+ * @nu_switch_ale: NU Switch ALE
+ * @vlan_entry_tbl: ALE vlan entry fields description tbl
+ */
+struct cpsw_ale_dev_id {
+ const char *dev_id;
+ u32 features;
+ u32 tbl_entries;
+ u32 major_ver_mask;
+ bool nu_switch_ale;
+ const struct ale_entry_fld *vlan_entry_tbl;
+};
+
+#define ALE_TABLE_WRITE BIT(31)
+
+#define ALE_TYPE_FREE 0
+#define ALE_TYPE_ADDR 1
+#define ALE_TYPE_VLAN 2
+#define ALE_TYPE_VLAN_ADDR 3
+
+#define ALE_UCAST_PERSISTANT 0
+#define ALE_UCAST_UNTOUCHED 1
+#define ALE_UCAST_OUI 2
+#define ALE_UCAST_TOUCHED 3
+
+#define ALE_TABLE_SIZE_MULTIPLIER 1024
+#define ALE_STATUS_SIZE_MASK 0x1f
+
+static inline int cpsw_ale_get_field(u32 *ale_entry, u32 start, u32 bits)
+{
+ int idx, idx2;
+ u32 hi_val = 0;
+
+ idx = start / 32;
+ idx2 = (start + bits - 1) / 32;
+ /* Check if bits to be fetched exceed a word */
+ if (idx != idx2) {
+ idx2 = 2 - idx2; /* flip */
+ hi_val = ale_entry[idx2] << ((idx2 * 32) - start);
+ }
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+ return (hi_val + (ale_entry[idx] >> start)) & BITMASK(bits);
+}
+
+static inline void cpsw_ale_set_field(u32 *ale_entry, u32 start, u32 bits,
+ u32 value)
+{
+ int idx, idx2;
+
+ value &= BITMASK(bits);
+ idx = start / 32;
+ idx2 = (start + bits - 1) / 32;
+ /* Check if bits to be set exceed a word */
+ if (idx != idx2) {
+ idx2 = 2 - idx2; /* flip */
+ ale_entry[idx2] &= ~(BITMASK(bits + start - (idx2 * 32)));
+ ale_entry[idx2] |= (value >> ((idx2 * 32) - start));
+ }
+ start -= idx * 32;
+ idx = 2 - idx; /* flip */
+ ale_entry[idx] &= ~(BITMASK(bits) << start);
+ ale_entry[idx] |= (value << start);
+}
+
+#define DEFINE_ALE_FIELD(name, start, bits) \
+static inline int cpsw_ale_get_##name(u32 *ale_entry) \
+{ \
+ return cpsw_ale_get_field(ale_entry, start, bits); \
+} \
+static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value) \
+{ \
+ cpsw_ale_set_field(ale_entry, start, bits, value); \
+}
+
+#define DEFINE_ALE_FIELD1(name, start) \
+static inline int cpsw_ale_get_##name(u32 *ale_entry, u32 bits) \
+{ \
+ return cpsw_ale_get_field(ale_entry, start, bits); \
+} \
+static inline void cpsw_ale_set_##name(u32 *ale_entry, u32 value, \
+ u32 bits) \
+{ \
+ cpsw_ale_set_field(ale_entry, start, bits, value); \
+}
+
+enum {
+ ALE_ENT_VID_MEMBER_LIST = 0,
+ ALE_ENT_VID_UNREG_MCAST_MSK,
+ ALE_ENT_VID_REG_MCAST_MSK,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK,
+ ALE_ENT_VID_UNREG_MCAST_IDX,
+ ALE_ENT_VID_REG_MCAST_IDX,
+ ALE_ENT_VID_LAST,
+};
+
+#define ALE_FLD_ALLOWED BIT(0)
+#define ALE_FLD_SIZE_PORT_MASK_BITS BIT(1)
+#define ALE_FLD_SIZE_PORT_NUM_BITS BIT(2)
+
+#define ALE_ENTRY_FLD(id, start, bits) \
+[id] = { \
+ .start_bit = start, \
+ .num_bits = bits, \
+ .flags = ALE_FLD_ALLOWED, \
+}
+
+#define ALE_ENTRY_FLD_DYN_MSK_SIZE(id, start) \
+[id] = { \
+ .start_bit = start, \
+ .num_bits = 0, \
+ .flags = ALE_FLD_ALLOWED | \
+ ALE_FLD_SIZE_PORT_MASK_BITS, \
+}
+
+/* dm814x, am3/am4/am5, k2hk */
+static const struct ale_entry_fld vlan_entry_cpsw[ALE_ENT_VID_LAST] = {
+ ALE_ENTRY_FLD(ALE_ENT_VID_MEMBER_LIST, 0, 3),
+ ALE_ENTRY_FLD(ALE_ENT_VID_UNREG_MCAST_MSK, 8, 3),
+ ALE_ENTRY_FLD(ALE_ENT_VID_REG_MCAST_MSK, 16, 3),
+ ALE_ENTRY_FLD(ALE_ENT_VID_FORCE_UNTAGGED_MSK, 24, 3),
+};
+
+/* k2e/k2l, k3 am65/j721e cpsw2g */
+static const struct ale_entry_fld vlan_entry_nu[ALE_ENT_VID_LAST] = {
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_MEMBER_LIST, 0),
+ ALE_ENTRY_FLD(ALE_ENT_VID_UNREG_MCAST_IDX, 20, 3),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_FORCE_UNTAGGED_MSK, 24),
+ ALE_ENTRY_FLD(ALE_ENT_VID_REG_MCAST_IDX, 44, 3),
+};
+
+/* K3 j721e/j7200 cpsw9g/5g, am64x cpsw3g */
+static const struct ale_entry_fld vlan_entry_k3_cpswxg[] = {
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_MEMBER_LIST, 0),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_UNREG_MCAST_MSK, 12),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_FORCE_UNTAGGED_MSK, 24),
+ ALE_ENTRY_FLD_DYN_MSK_SIZE(ALE_ENT_VID_REG_MCAST_MSK, 36),
+};
+
+DEFINE_ALE_FIELD(entry_type, 60, 2)
+DEFINE_ALE_FIELD(vlan_id, 48, 12)
+DEFINE_ALE_FIELD(mcast_state, 62, 2)
+DEFINE_ALE_FIELD1(port_mask, 66)
+DEFINE_ALE_FIELD(super, 65, 1)
+DEFINE_ALE_FIELD(ucast_type, 62, 2)
+DEFINE_ALE_FIELD1(port_num, 66)
+DEFINE_ALE_FIELD(blocked, 65, 1)
+DEFINE_ALE_FIELD(secure, 64, 1)
+DEFINE_ALE_FIELD(mcast, 40, 1)
+
+#define NU_VLAN_UNREG_MCAST_IDX 1
+
+static int cpsw_ale_entry_get_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ const struct ale_entry_fld *entry_tbl,
+ int fld_id)
+{
+ const struct ale_entry_fld *entry_fld;
+ u32 bits;
+
+ if (!ale || !ale_entry)
+ return -EINVAL;
+
+ entry_fld = &entry_tbl[fld_id];
+ if (!(entry_fld->flags & ALE_FLD_ALLOWED)) {
+ dev_err(ale->params.dev, "get: wrong ale fld id %d\n", fld_id);
+ return -ENOENT;
+ }
+
+ bits = entry_fld->num_bits;
+ if (entry_fld->flags & ALE_FLD_SIZE_PORT_MASK_BITS)
+ bits = ale->port_mask_bits;
+
+ return cpsw_ale_get_field(ale_entry, entry_fld->start_bit, bits);
+}
+
+static void cpsw_ale_entry_set_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ const struct ale_entry_fld *entry_tbl,
+ int fld_id,
+ u32 value)
+{
+ const struct ale_entry_fld *entry_fld;
+ u32 bits;
+
+ if (!ale || !ale_entry)
+ return;
+
+ entry_fld = &entry_tbl[fld_id];
+ if (!(entry_fld->flags & ALE_FLD_ALLOWED)) {
+ dev_err(ale->params.dev, "set: wrong ale fld id %d\n", fld_id);
+ return;
+ }
+
+ bits = entry_fld->num_bits;
+ if (entry_fld->flags & ALE_FLD_SIZE_PORT_MASK_BITS)
+ bits = ale->port_mask_bits;
+
+ cpsw_ale_set_field(ale_entry, entry_fld->start_bit, bits, value);
+}
+
+static int cpsw_ale_vlan_get_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ int fld_id)
+{
+ return cpsw_ale_entry_get_fld(ale, ale_entry,
+ ale->vlan_entry_tbl, fld_id);
+}
+
+static void cpsw_ale_vlan_set_fld(struct cpsw_ale *ale,
+ u32 *ale_entry,
+ int fld_id,
+ u32 value)
+{
+ cpsw_ale_entry_set_fld(ale, ale_entry,
+ ale->vlan_entry_tbl, fld_id, value);
+}
+
+/* The MAC address field in the ALE entry cannot be macroized as above */
+static inline void cpsw_ale_get_addr(u32 *ale_entry, u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ addr[i] = cpsw_ale_get_field(ale_entry, 40 - 8*i, 8);
+}
+
+static inline void cpsw_ale_set_addr(u32 *ale_entry, const u8 *addr)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ cpsw_ale_set_field(ale_entry, 40 - 8*i, 8, addr[i]);
+}
+
+static int cpsw_ale_read(struct cpsw_ale *ale, int idx, u32 *ale_entry)
+{
+ int i;
+
+ WARN_ON(idx > ale->params.ale_entries);
+
+ writel_relaxed(idx, ale->params.ale_regs + ALE_TABLE_CONTROL);
+
+ for (i = 0; i < ALE_ENTRY_WORDS; i++)
+ ale_entry[i] = readl_relaxed(ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
+
+ return idx;
+}
+
+static int cpsw_ale_write(struct cpsw_ale *ale, int idx, u32 *ale_entry)
+{
+ int i;
+
+ WARN_ON(idx > ale->params.ale_entries);
+
+ for (i = 0; i < ALE_ENTRY_WORDS; i++)
+ writel_relaxed(ale_entry[i], ale->params.ale_regs +
+ ALE_TABLE + 4 * i);
+
+ writel_relaxed(idx | ALE_TABLE_WRITE, ale->params.ale_regs +
+ ALE_TABLE_CONTROL);
+
+ return idx;
+}
+
+static int cpsw_ale_match_addr(struct cpsw_ale *ale, const u8 *addr, u16 vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ u8 entry_addr[6];
+
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+ continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
+ cpsw_ale_get_addr(ale_entry, entry_addr);
+ if (ether_addr_equal(entry_addr, addr))
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_match_vlan(struct cpsw_ale *ale, u16 vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+ if (cpsw_ale_get_vlan_id(ale_entry) == vid)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_match_free(struct cpsw_ale *ale)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type == ALE_TYPE_FREE)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static int cpsw_ale_find_ageable(struct cpsw_ale *ale)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_ADDR && type != ALE_TYPE_VLAN_ADDR)
+ continue;
+ if (cpsw_ale_get_mcast(ale_entry))
+ continue;
+ type = cpsw_ale_get_ucast_type(ale_entry);
+ if (type != ALE_UCAST_PERSISTANT &&
+ type != ALE_UCAST_OUI)
+ return idx;
+ }
+ return -ENOENT;
+}
+
+static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ int port_mask)
+{
+ int mask;
+
+ mask = cpsw_ale_get_port_mask(ale_entry,
+ ale->port_mask_bits);
+ if ((mask & port_mask) == 0)
+ return; /* ports dont intersect, not interested */
+ mask &= ~port_mask;
+
+ /* free if only remaining port is host port */
+ if (mask)
+ cpsw_ale_set_port_mask(ale_entry, mask,
+ ale->port_mask_bits);
+ else
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+}
+
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int ret, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ ret = cpsw_ale_get_entry_type(ale_entry);
+ if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
+ continue;
+
+ /* if vid passed is -1 then remove all multicast entry from
+ * the table irrespective of vlan id, if a valid vlan id is
+ * passed then remove only multicast added to that vlan id.
+ * if vlan id doesn't match then move on to next entry.
+ */
+ if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
+
+ if (cpsw_ale_get_mcast(ale_entry)) {
+ u8 addr[6];
+
+ if (cpsw_ale_get_super(ale_entry))
+ continue;
+
+ cpsw_ale_get_addr(ale_entry, addr);
+ if (!is_broadcast_ether_addr(addr))
+ cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
+ }
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+ return 0;
+}
+
+static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
+ int flags, u16 vid)
+{
+ if (flags & ALE_VLAN) {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN_ADDR);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+ } else {
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_ADDR);
+ }
+}
+
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
+ int flags, u16 vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
+ cpsw_ale_set_addr(ale_entry, addr);
+ cpsw_ale_set_ucast_type(ale_entry, ALE_UCAST_PERSISTANT);
+ cpsw_ale_set_secure(ale_entry, (flags & ALE_SECURE) ? 1 : 0);
+ cpsw_ale_set_blocked(ale_entry, (flags & ALE_BLOCKED) ? 1 : 0);
+ cpsw_ale_set_port_num(ale_entry, port, ale->port_num_bits);
+
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(ale);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
+ int flags, u16 vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
+ int flags, u16 vid, int mcast_state)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx, mask;
+
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ cpsw_ale_set_vlan_entry_type(ale_entry, flags, vid);
+
+ cpsw_ale_set_addr(ale_entry, addr);
+ cpsw_ale_set_super(ale_entry, (flags & ALE_SUPER) ? 1 : 0);
+ cpsw_ale_set_mcast_state(ale_entry, mcast_state);
+
+ mask = cpsw_ale_get_port_mask(ale_entry,
+ ale->port_mask_bits);
+ port_mask |= mask;
+ cpsw_ale_set_port_mask(ale_entry, port_mask,
+ ale->port_mask_bits);
+
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(ale);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
+ int flags, u16 vid)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int mcast_members = 0;
+ int idx;
+
+ idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ if (port_mask) {
+ mcast_members = cpsw_ale_get_port_mask(ale_entry,
+ ale->port_mask_bits);
+ mcast_members &= ~port_mask;
+ }
+
+ if (mcast_members)
+ cpsw_ale_set_port_mask(ale_entry, mcast_members,
+ ale->port_mask_bits);
+ else
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+/* ALE NetCP NU switch specific vlan functions */
+static void cpsw_ale_set_vlan_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ int reg_mcast, int unreg_mcast)
+{
+ int idx;
+
+ /* Set VLAN registered multicast flood mask */
+ idx = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_IDX);
+ writel(reg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+
+ /* Set VLAN unregistered multicast flood mask */
+ idx = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_IDX);
+ writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+}
+
+static void cpsw_ale_set_vlan_untag(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int untag_mask)
+{
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK,
+ untag_mask);
+ if (untag_mask & ALE_PORT_HOST)
+ bitmap_set(ale->p0_untag_vid_mask, vid, 1);
+ else
+ bitmap_clear(ale->p0_untag_vid_mask, vid, 1);
+}
+
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port_mask, int untag,
+ int reg_mcast, int unreg_mcast)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_VLAN);
+ cpsw_ale_set_vlan_id(ale_entry, vid);
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
+
+ if (!ale->params.nu_switch_ale) {
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK, reg_mcast);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK, unreg_mcast);
+ } else {
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_IDX,
+ NU_VLAN_UNREG_MCAST_IDX);
+ cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast, unreg_mcast);
+ }
+
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST, port_mask);
+
+ if (idx < 0)
+ idx = cpsw_ale_match_free(ale);
+ if (idx < 0)
+ idx = cpsw_ale_find_ageable(ale);
+ if (idx < 0)
+ return -ENOMEM;
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ return 0;
+}
+
+static void cpsw_ale_vlan_del_modify_int(struct cpsw_ale *ale, u32 *ale_entry,
+ u16 vid, int port_mask)
+{
+ int reg_mcast, unreg_mcast;
+ int members, untag;
+
+ members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST);
+ members &= ~port_mask;
+ if (!members) {
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ return;
+ }
+
+ untag = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK);
+ reg_mcast = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK);
+ unreg_mcast = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
+ untag &= members;
+ reg_mcast &= members;
+ unreg_mcast &= members;
+
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, untag);
+
+ if (!ale->params.nu_switch_ale) {
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK, reg_mcast);
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK, unreg_mcast);
+ } else {
+ cpsw_ale_set_vlan_mcast(ale, ale_entry, reg_mcast,
+ unreg_mcast);
+ }
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST, members);
+}
+
+int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
+ cpsw_ale_write(ale, idx, ale_entry);
+
+ return 0;
+}
+
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int members, idx;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx < 0)
+ return -ENOENT;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ /* if !port_mask - force remove VLAN (legacy).
+ * Check if there are other VLAN members ports
+ * if no - remove VLAN.
+ * if yes it means same VLAN was added to >1 port in multi port mode, so
+ * remove port_mask ports from VLAN ALE entry excluding Host port.
+ */
+ members = cpsw_ale_vlan_get_fld(ale, ale_entry, ALE_ENT_VID_MEMBER_LIST);
+ members &= ~port_mask;
+
+ if (!port_mask || !members) {
+ /* last port or force remove - remove VLAN */
+ cpsw_ale_set_vlan_untag(ale, ale_entry, vid, 0);
+ cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
+ } else {
+ port_mask &= ~ALE_PORT_HOST;
+ cpsw_ale_vlan_del_modify_int(ale, ale_entry, vid, port_mask);
+ }
+
+ cpsw_ale_write(ale, idx, ale_entry);
+
+ return 0;
+}
+
+int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
+ int untag_mask, int reg_mask, int unreg_mask)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS] = {0, 0, 0};
+ int reg_mcast_members, unreg_mcast_members;
+ int vlan_members, untag_members;
+ int idx, ret = 0;
+
+ idx = cpsw_ale_match_vlan(ale, vid);
+ if (idx >= 0)
+ cpsw_ale_read(ale, idx, ale_entry);
+
+ vlan_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST);
+ reg_mcast_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_REG_MCAST_MSK);
+ unreg_mcast_members =
+ cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
+ untag_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_FORCE_UNTAGGED_MSK);
+
+ vlan_members |= port_mask;
+ untag_members = (untag_members & ~port_mask) | untag_mask;
+ reg_mcast_members = (reg_mcast_members & ~port_mask) | reg_mask;
+ unreg_mcast_members = (unreg_mcast_members & ~port_mask) | unreg_mask;
+
+ ret = cpsw_ale_add_vlan(ale, vid, vlan_members, untag_members,
+ reg_mcast_members, unreg_mcast_members);
+ if (ret) {
+ dev_err(ale->params.dev, "Unable to add vlan\n");
+ return ret;
+ }
+ dev_dbg(ale->params.dev, "port mask 0x%x untag 0x%x\n", vlan_members,
+ untag_mask);
+
+ return ret;
+}
+
+void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
+ bool add)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int unreg_members = 0;
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+
+ unreg_members =
+ cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
+ if (add)
+ unreg_members |= unreg_mcast_mask;
+ else
+ unreg_members &= ~unreg_mcast_mask;
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK,
+ unreg_members);
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+}
+
+static void cpsw_ale_vlan_set_unreg_mcast(struct cpsw_ale *ale, u32 *ale_entry,
+ int allmulti)
+{
+ int unreg_mcast;
+
+ unreg_mcast = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK);
+ if (allmulti)
+ unreg_mcast |= ALE_PORT_HOST;
+ else
+ unreg_mcast &= ~ALE_PORT_HOST;
+
+ cpsw_ale_vlan_set_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_MSK, unreg_mcast);
+}
+
+static void
+cpsw_ale_vlan_set_unreg_mcast_idx(struct cpsw_ale *ale, u32 *ale_entry,
+ int allmulti)
+{
+ int unreg_mcast;
+ int idx;
+
+ idx = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_UNREG_MCAST_IDX);
+
+ unreg_mcast = readl(ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+
+ if (allmulti)
+ unreg_mcast |= ALE_PORT_HOST;
+ else
+ unreg_mcast &= ~ALE_PORT_HOST;
+
+ writel(unreg_mcast, ale->params.ale_regs + ALE_VLAN_MASK_MUX(idx));
+}
+
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port)
+{
+ u32 ale_entry[ALE_ENTRY_WORDS];
+ int type, idx;
+
+ for (idx = 0; idx < ale->params.ale_entries; idx++) {
+ int vlan_members;
+
+ cpsw_ale_read(ale, idx, ale_entry);
+ type = cpsw_ale_get_entry_type(ale_entry);
+ if (type != ALE_TYPE_VLAN)
+ continue;
+
+ vlan_members = cpsw_ale_vlan_get_fld(ale, ale_entry,
+ ALE_ENT_VID_MEMBER_LIST);
+
+ if (port != -1 && !(vlan_members & BIT(port)))
+ continue;
+
+ if (!ale->params.nu_switch_ale)
+ cpsw_ale_vlan_set_unreg_mcast(ale, ale_entry, allmulti);
+ else
+ cpsw_ale_vlan_set_unreg_mcast_idx(ale, ale_entry,
+ allmulti);
+
+ cpsw_ale_write(ale, idx, ale_entry);
+ }
+}
+
+struct ale_control_info {
+ const char *name;
+ int offset, port_offset;
+ int shift, port_shift;
+ int bits;
+};
+
+static struct ale_control_info ale_controls[ALE_NUM_CONTROLS] = {
+ [ALE_ENABLE] = {
+ .name = "enable",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 31,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_CLEAR] = {
+ .name = "clear",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 30,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_AGEOUT] = {
+ .name = "ageout",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 29,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_P0_UNI_FLOOD] = {
+ .name = "port0_unicast_flood",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 8,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_VLAN_NOLEARN] = {
+ .name = "vlan_nolearn",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 7,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_NO_PORT_VLAN] = {
+ .name = "no_port_vlan",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 6,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_OUI_DENY] = {
+ .name = "oui_deny",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 5,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_BYPASS] = {
+ .name = "bypass",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 4,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_RATE_LIMIT_TX] = {
+ .name = "rate_limit_tx",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 3,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_VLAN_AWARE] = {
+ .name = "vlan_aware",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 2,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_AUTH_ENABLE] = {
+ .name = "auth_enable",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 1,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_RATE_LIMIT] = {
+ .name = "rate_limit",
+ .offset = ALE_CONTROL,
+ .port_offset = 0,
+ .shift = 0,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_STATE] = {
+ .name = "port_state",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 0,
+ .port_shift = 0,
+ .bits = 2,
+ },
+ [ALE_PORT_DROP_UNTAGGED] = {
+ .name = "drop_untagged",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 2,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_DROP_UNKNOWN_VLAN] = {
+ .name = "drop_unknown",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 3,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_NOLEARN] = {
+ .name = "nolearn",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 4,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_NO_SA_UPDATE] = {
+ .name = "no_source_update",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 5,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_MACONLY] = {
+ .name = "mac_only_port_mode",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 11,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_MACONLY_CAF] = {
+ .name = "mac_only_port_caf",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 13,
+ .port_shift = 0,
+ .bits = 1,
+ },
+ [ALE_PORT_MCAST_LIMIT] = {
+ .name = "mcast_limit",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 16,
+ .port_shift = 0,
+ .bits = 8,
+ },
+ [ALE_PORT_BCAST_LIMIT] = {
+ .name = "bcast_limit",
+ .offset = ALE_PORTCTL,
+ .port_offset = 4,
+ .shift = 24,
+ .port_shift = 0,
+ .bits = 8,
+ },
+ [ALE_PORT_UNKNOWN_VLAN_MEMBER] = {
+ .name = "unknown_vlan_member",
+ .offset = ALE_UNKNOWNVLAN,
+ .port_offset = 0,
+ .shift = 0,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_PORT_UNKNOWN_MCAST_FLOOD] = {
+ .name = "unknown_mcast_flood",
+ .offset = ALE_UNKNOWNVLAN,
+ .port_offset = 0,
+ .shift = 8,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_PORT_UNKNOWN_REG_MCAST_FLOOD] = {
+ .name = "unknown_reg_flood",
+ .offset = ALE_UNKNOWNVLAN,
+ .port_offset = 0,
+ .shift = 16,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_PORT_UNTAGGED_EGRESS] = {
+ .name = "untagged_egress",
+ .offset = ALE_UNKNOWNVLAN,
+ .port_offset = 0,
+ .shift = 24,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_DEFAULT_THREAD_ID] = {
+ .name = "default_thread_id",
+ .offset = AM65_CPSW_ALE_THREAD_DEF_REG,
+ .port_offset = 0,
+ .shift = 0,
+ .port_shift = 0,
+ .bits = 6,
+ },
+ [ALE_DEFAULT_THREAD_ENABLE] = {
+ .name = "default_thread_id_enable",
+ .offset = AM65_CPSW_ALE_THREAD_DEF_REG,
+ .port_offset = 0,
+ .shift = 15,
+ .port_shift = 0,
+ .bits = 1,
+ },
+};
+
+int cpsw_ale_control_set(struct cpsw_ale *ale, int port, int control,
+ int value)
+{
+ const struct ale_control_info *info;
+ int offset, shift;
+ u32 tmp, mask;
+
+ if (control < 0 || control >= ARRAY_SIZE(ale_controls))
+ return -EINVAL;
+
+ info = &ale_controls[control];
+ if (info->port_offset == 0 && info->port_shift == 0)
+ port = 0; /* global, port is a dont care */
+
+ if (port < 0 || port >= ale->params.ale_ports)
+ return -EINVAL;
+
+ mask = BITMASK(info->bits);
+ if (value & ~mask)
+ return -EINVAL;
+
+ offset = info->offset + (port * info->port_offset);
+ shift = info->shift + (port * info->port_shift);
+
+ tmp = readl_relaxed(ale->params.ale_regs + offset);
+ tmp = (tmp & ~(mask << shift)) | (value << shift);
+ writel_relaxed(tmp, ale->params.ale_regs + offset);
+
+ return 0;
+}
+
+int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control)
+{
+ const struct ale_control_info *info;
+ int offset, shift;
+ u32 tmp;
+
+ if (control < 0 || control >= ARRAY_SIZE(ale_controls))
+ return -EINVAL;
+
+ info = &ale_controls[control];
+ if (info->port_offset == 0 && info->port_shift == 0)
+ port = 0; /* global, port is a dont care */
+
+ if (port < 0 || port >= ale->params.ale_ports)
+ return -EINVAL;
+
+ offset = info->offset + (port * info->port_offset);
+ shift = info->shift + (port * info->port_shift);
+
+ tmp = readl_relaxed(ale->params.ale_regs + offset) >> shift;
+ return tmp & BITMASK(info->bits);
+}
+
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE MC port:%d ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d MC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_MCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d MC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps)
+
+{
+ int val = ratelimit_pps / ALE_RATE_LIMIT_MIN_PPS;
+ u32 remainder = ratelimit_pps % ALE_RATE_LIMIT_MIN_PPS;
+
+ if (ratelimit_pps && !val) {
+ dev_err(ale->params.dev, "ALE port:%d BC ratelimit min value 1000pps\n", port);
+ return -EINVAL;
+ }
+
+ if (remainder)
+ dev_info(ale->params.dev, "ALE port:%d BC ratelimit set to %dpps (requested %d)\n",
+ port, ratelimit_pps - remainder, ratelimit_pps);
+
+ cpsw_ale_control_set(ale, port, ALE_PORT_BCAST_LIMIT, val);
+
+ dev_dbg(ale->params.dev, "ALE port:%d BC ratelimit set %d\n",
+ port, val * ALE_RATE_LIMIT_MIN_PPS);
+ return 0;
+}
+
+static void cpsw_ale_timer(struct timer_list *t)
+{
+ struct cpsw_ale *ale = from_timer(ale, t, timer);
+
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+ if (ale->ageout) {
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+ }
+}
+
+static void cpsw_ale_hw_aging_timer_start(struct cpsw_ale *ale)
+{
+ u32 aging_timer;
+
+ aging_timer = ale->params.bus_freq / 1000000;
+ aging_timer *= ale->params.ale_ageout;
+
+ if (aging_timer & ~ALE_AGING_TIMER_MASK) {
+ aging_timer = ALE_AGING_TIMER_MASK;
+ dev_warn(ale->params.dev,
+ "ALE aging timer overflow, set to max\n");
+ }
+
+ writel(aging_timer, ale->params.ale_regs + ALE_AGING_TIMER);
+}
+
+static void cpsw_ale_hw_aging_timer_stop(struct cpsw_ale *ale)
+{
+ writel(0, ale->params.ale_regs + ALE_AGING_TIMER);
+}
+
+static void cpsw_ale_aging_start(struct cpsw_ale *ale)
+{
+ if (!ale->params.ale_ageout)
+ return;
+
+ if (ale->features & CPSW_ALE_F_HW_AUTOAGING) {
+ cpsw_ale_hw_aging_timer_start(ale);
+ return;
+ }
+
+ timer_setup(&ale->timer, cpsw_ale_timer, 0);
+ ale->timer.expires = jiffies + ale->ageout;
+ add_timer(&ale->timer);
+}
+
+static void cpsw_ale_aging_stop(struct cpsw_ale *ale)
+{
+ if (!ale->params.ale_ageout)
+ return;
+
+ if (ale->features & CPSW_ALE_F_HW_AUTOAGING) {
+ cpsw_ale_hw_aging_timer_stop(ale);
+ return;
+ }
+
+ del_timer_sync(&ale->timer);
+}
+
+void cpsw_ale_start(struct cpsw_ale *ale)
+{
+ unsigned long ale_prescale;
+
+ /* configure Broadcast and Multicast Rate Limit
+ * number_of_packets = (Fclk / ALE_PRESCALE) * port.BCAST/MCAST_LIMIT
+ * ALE_PRESCALE width is 19bit and min value 0x10
+ * port.BCAST/MCAST_LIMIT is 8bit
+ *
+ * For multi port configuration support the ALE_PRESCALE is configured to 1ms interval,
+ * which allows to configure port.BCAST/MCAST_LIMIT per port and achieve:
+ * min number_of_packets = 1000 when port.BCAST/MCAST_LIMIT = 1
+ * max number_of_packets = 1000 * 255 = 255000 when port.BCAST/MCAST_LIMIT = 0xFF
+ */
+ ale_prescale = ale->params.bus_freq / ALE_RATE_LIMIT_MIN_PPS;
+ writel((u32)ale_prescale, ale->params.ale_regs + ALE_PRESCALE);
+
+ /* Allow MC/BC rate limiting globally.
+ * The actual Rate Limit cfg enabled per-port by port.BCAST/MCAST_LIMIT
+ */
+ cpsw_ale_control_set(ale, 0, ALE_RATE_LIMIT, 1);
+
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 1);
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+
+ cpsw_ale_aging_start(ale);
+}
+
+void cpsw_ale_stop(struct cpsw_ale *ale)
+{
+ cpsw_ale_aging_stop(ale);
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
+}
+
+static const struct cpsw_ale_dev_id cpsw_ale_id_match[] = {
+ {
+ /* am3/4/5, dra7. dm814x, 66ak2hk-gbe */
+ .dev_id = "cpsw",
+ .tbl_entries = 1024,
+ .major_ver_mask = 0xff,
+ .vlan_entry_tbl = vlan_entry_cpsw,
+ },
+ {
+ /* 66ak2h_xgbe */
+ .dev_id = "66ak2h-xgbe",
+ .tbl_entries = 2048,
+ .major_ver_mask = 0xff,
+ .vlan_entry_tbl = vlan_entry_cpsw,
+ },
+ {
+ .dev_id = "66ak2el",
+ .features = CPSW_ALE_F_STATUS_REG,
+ .major_ver_mask = 0x7,
+ .nu_switch_ale = true,
+ .vlan_entry_tbl = vlan_entry_nu,
+ },
+ {
+ .dev_id = "66ak2g",
+ .features = CPSW_ALE_F_STATUS_REG,
+ .tbl_entries = 64,
+ .major_ver_mask = 0x7,
+ .nu_switch_ale = true,
+ .vlan_entry_tbl = vlan_entry_nu,
+ },
+ {
+ .dev_id = "am65x-cpsw2g",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .tbl_entries = 64,
+ .major_ver_mask = 0x7,
+ .nu_switch_ale = true,
+ .vlan_entry_tbl = vlan_entry_nu,
+ },
+ {
+ .dev_id = "j721e-cpswxg",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .major_ver_mask = 0x7,
+ .vlan_entry_tbl = vlan_entry_k3_cpswxg,
+ },
+ {
+ .dev_id = "am64-cpswxg",
+ .features = CPSW_ALE_F_STATUS_REG | CPSW_ALE_F_HW_AUTOAGING,
+ .major_ver_mask = 0x7,
+ .vlan_entry_tbl = vlan_entry_k3_cpswxg,
+ .tbl_entries = 512,
+ },
+ { },
+};
+
+static const struct
+cpsw_ale_dev_id *cpsw_ale_match_id(const struct cpsw_ale_dev_id *id,
+ const char *dev_id)
+{
+ if (!dev_id)
+ return NULL;
+
+ while (id->dev_id) {
+ if (strcmp(dev_id, id->dev_id) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params)
+{
+ const struct cpsw_ale_dev_id *ale_dev_id;
+ struct cpsw_ale *ale;
+ u32 rev, ale_entries;
+
+ ale_dev_id = cpsw_ale_match_id(cpsw_ale_id_match, params->dev_id);
+ if (!ale_dev_id)
+ return ERR_PTR(-EINVAL);
+
+ params->ale_entries = ale_dev_id->tbl_entries;
+ params->major_ver_mask = ale_dev_id->major_ver_mask;
+ params->nu_switch_ale = ale_dev_id->nu_switch_ale;
+
+ ale = devm_kzalloc(params->dev, sizeof(*ale), GFP_KERNEL);
+ if (!ale)
+ return ERR_PTR(-ENOMEM);
+
+ ale->p0_untag_vid_mask = devm_bitmap_zalloc(params->dev, VLAN_N_VID,
+ GFP_KERNEL);
+ if (!ale->p0_untag_vid_mask)
+ return ERR_PTR(-ENOMEM);
+
+ ale->params = *params;
+ ale->ageout = ale->params.ale_ageout * HZ;
+ ale->features = ale_dev_id->features;
+ ale->vlan_entry_tbl = ale_dev_id->vlan_entry_tbl;
+
+ rev = readl_relaxed(ale->params.ale_regs + ALE_IDVER);
+ ale->version =
+ (ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask) << 8) |
+ ALE_VERSION_MINOR(rev);
+ dev_info(ale->params.dev, "initialized cpsw ale version %d.%d\n",
+ ALE_VERSION_MAJOR(rev, ale->params.major_ver_mask),
+ ALE_VERSION_MINOR(rev));
+
+ if (ale->features & CPSW_ALE_F_STATUS_REG &&
+ !ale->params.ale_entries) {
+ ale_entries =
+ readl_relaxed(ale->params.ale_regs + ALE_STATUS) &
+ ALE_STATUS_SIZE_MASK;
+ /* ALE available on newer NetCP switches has introduced
+ * a register, ALE_STATUS, to indicate the size of ALE
+ * table which shows the size as a multiple of 1024 entries.
+ * For these, params.ale_entries will be set to zero. So
+ * read the register and update the value of ale_entries.
+ * return error if ale_entries is zero in ALE_STATUS.
+ */
+ if (!ale_entries)
+ return ERR_PTR(-EINVAL);
+
+ ale_entries *= ALE_TABLE_SIZE_MULTIPLIER;
+ ale->params.ale_entries = ale_entries;
+ }
+ dev_info(ale->params.dev,
+ "ALE Table size %ld\n", ale->params.ale_entries);
+
+ /* set default bits for existing h/w */
+ ale->port_mask_bits = ale->params.ale_ports;
+ ale->port_num_bits = order_base_2(ale->params.ale_ports);
+ ale->vlan_field_bits = ale->params.ale_ports;
+
+ /* Set defaults override for ALE on NetCP NU switch and for version
+ * 1R3
+ */
+ if (ale->params.nu_switch_ale) {
+ /* Separate registers for unknown vlan configuration.
+ * Also there are N bits, where N is number of ale
+ * ports and shift value should be 0
+ */
+ ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].bits =
+ ale->params.ale_ports;
+ ale_controls[ALE_PORT_UNKNOWN_VLAN_MEMBER].offset =
+ ALE_UNKNOWNVLAN_MEMBER;
+ ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].bits =
+ ale->params.ale_ports;
+ ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].shift = 0;
+ ale_controls[ALE_PORT_UNKNOWN_MCAST_FLOOD].offset =
+ ALE_UNKNOWNVLAN_UNREG_MCAST_FLOOD;
+ ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].bits =
+ ale->params.ale_ports;
+ ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].shift = 0;
+ ale_controls[ALE_PORT_UNKNOWN_REG_MCAST_FLOOD].offset =
+ ALE_UNKNOWNVLAN_REG_MCAST_FLOOD;
+ ale_controls[ALE_PORT_UNTAGGED_EGRESS].bits =
+ ale->params.ale_ports;
+ ale_controls[ALE_PORT_UNTAGGED_EGRESS].shift = 0;
+ ale_controls[ALE_PORT_UNTAGGED_EGRESS].offset =
+ ALE_UNKNOWNVLAN_FORCE_UNTAG_EGRESS;
+ }
+
+ cpsw_ale_control_set(ale, 0, ALE_CLEAR, 1);
+ return ale;
+}
+
+void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
+{
+ int i;
+
+ for (i = 0; i < ale->params.ale_entries; i++) {
+ cpsw_ale_read(ale, i, data);
+ data += ALE_ENTRY_WORDS;
+ }
+}
+
+u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale)
+{
+ return ale ? ale->params.ale_entries : 0;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
new file mode 100644
index 000000000..aba4572cf
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments N-Port Ethernet Switch Address Lookup Engine APIs
+ *
+ * Copyright (C) 2012 Texas Instruments
+ *
+ */
+#ifndef __TI_CPSW_ALE_H__
+#define __TI_CPSW_ALE_H__
+
+struct cpsw_ale_params {
+ struct device *dev;
+ void __iomem *ale_regs;
+ unsigned long ale_ageout; /* in secs */
+ unsigned long ale_entries;
+ unsigned long ale_ports;
+ /* NU Switch has specific handling as number of bits in ALE entries
+ * are different than other versions of ALE. Also there are specific
+ * registers for unknown vlan specific fields. So use nu_switch_ale
+ * to identify this hardware.
+ */
+ bool nu_switch_ale;
+ /* mask bit used in NU Switch ALE is 3 bits instead of 8 bits. So
+ * pass it from caller.
+ */
+ u32 major_ver_mask;
+ const char *dev_id;
+ unsigned long bus_freq;
+};
+
+struct ale_entry_fld;
+
+struct cpsw_ale {
+ struct cpsw_ale_params params;
+ struct timer_list timer;
+ unsigned long ageout;
+ u32 version;
+ u32 features;
+ /* These bits are different on NetCP NU Switch ALE */
+ u32 port_mask_bits;
+ u32 port_num_bits;
+ u32 vlan_field_bits;
+ unsigned long *p0_untag_vid_mask;
+ const struct ale_entry_fld *vlan_entry_tbl;
+};
+
+enum cpsw_ale_control {
+ /* global */
+ ALE_ENABLE,
+ ALE_CLEAR,
+ ALE_AGEOUT,
+ ALE_P0_UNI_FLOOD,
+ ALE_VLAN_NOLEARN,
+ ALE_NO_PORT_VLAN,
+ ALE_OUI_DENY,
+ ALE_BYPASS,
+ ALE_RATE_LIMIT_TX,
+ ALE_VLAN_AWARE,
+ ALE_AUTH_ENABLE,
+ ALE_RATE_LIMIT,
+ /* port controls */
+ ALE_PORT_STATE,
+ ALE_PORT_DROP_UNTAGGED,
+ ALE_PORT_DROP_UNKNOWN_VLAN,
+ ALE_PORT_NOLEARN,
+ ALE_PORT_NO_SA_UPDATE,
+ ALE_PORT_UNKNOWN_VLAN_MEMBER,
+ ALE_PORT_UNKNOWN_MCAST_FLOOD,
+ ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
+ ALE_PORT_UNTAGGED_EGRESS,
+ ALE_PORT_MACONLY,
+ ALE_PORT_MACONLY_CAF,
+ ALE_PORT_BCAST_LIMIT,
+ ALE_PORT_MCAST_LIMIT,
+ ALE_DEFAULT_THREAD_ID,
+ ALE_DEFAULT_THREAD_ENABLE,
+ ALE_NUM_CONTROLS,
+};
+
+enum cpsw_ale_port_state {
+ ALE_PORT_STATE_DISABLE = 0x00,
+ ALE_PORT_STATE_BLOCK = 0x01,
+ ALE_PORT_STATE_LEARN = 0x02,
+ ALE_PORT_STATE_FORWARD = 0x03,
+};
+
+/* ALE unicast entry flags - passed into cpsw_ale_add_ucast() */
+#define ALE_SECURE BIT(0)
+#define ALE_BLOCKED BIT(1)
+#define ALE_SUPER BIT(2)
+#define ALE_VLAN BIT(3)
+
+#define ALE_PORT_HOST BIT(0)
+#define ALE_PORT_1 BIT(1)
+#define ALE_PORT_2 BIT(2)
+
+#define ALE_MCAST_FWD 0
+#define ALE_MCAST_BLOCK_LEARN_FWD 1
+#define ALE_MCAST_FWD_LEARN 2
+#define ALE_MCAST_FWD_2 3
+
+#define ALE_ENTRY_BITS 68
+#define ALE_ENTRY_WORDS DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
+
+struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
+
+void cpsw_ale_start(struct cpsw_ale *ale);
+void cpsw_ale_stop(struct cpsw_ale *ale);
+
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
+int cpsw_ale_add_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
+ int flags, u16 vid);
+int cpsw_ale_del_ucast(struct cpsw_ale *ale, const u8 *addr, int port,
+ int flags, u16 vid);
+int cpsw_ale_add_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
+ int flags, u16 vid, int mcast_state);
+int cpsw_ale_del_mcast(struct cpsw_ale *ale, const u8 *addr, int port_mask,
+ int flags, u16 vid);
+int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
+ int reg_mcast, int unreg_mcast);
+int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
+void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti, int port);
+int cpsw_ale_rx_ratelimit_bc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
+int cpsw_ale_rx_ratelimit_mc(struct cpsw_ale *ale, int port, unsigned int ratelimit_pps);
+
+int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
+int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
+ int control, int value);
+void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
+u32 cpsw_ale_get_num_entries(struct cpsw_ale *ale);
+
+static inline int cpsw_ale_get_vlan_p0_untag(struct cpsw_ale *ale, u16 vid)
+{
+ return test_bit(vid, ale->p0_untag_vid_mask);
+}
+
+int cpsw_ale_vlan_add_modify(struct cpsw_ale *ale, u16 vid, int port_mask,
+ int untag_mask, int reg_mcast, int unreg_mcast);
+int cpsw_ale_vlan_del_modify(struct cpsw_ale *ale, u16 vid, int port_mask);
+void cpsw_ale_set_unreg_mcast(struct cpsw_ale *ale, int unreg_mcast_mask,
+ bool add);
+
+#endif
diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c
new file mode 100644
index 000000000..a557a477d
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_ethtool.c
@@ -0,0 +1,752 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver ethtool intf
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/pm_runtime.h>
+#include <linux/skbuff.h>
+
+#include "cpsw.h"
+#include "cpts.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "davinci_cpdma.h"
+
+struct cpsw_hw_stats {
+ u32 rxgoodframes;
+ u32 rxbroadcastframes;
+ u32 rxmulticastframes;
+ u32 rxpauseframes;
+ u32 rxcrcerrors;
+ u32 rxaligncodeerrors;
+ u32 rxoversizedframes;
+ u32 rxjabberframes;
+ u32 rxundersizedframes;
+ u32 rxfragments;
+ u32 __pad_0[2];
+ u32 rxoctets;
+ u32 txgoodframes;
+ u32 txbroadcastframes;
+ u32 txmulticastframes;
+ u32 txpauseframes;
+ u32 txdeferredframes;
+ u32 txcollisionframes;
+ u32 txsinglecollframes;
+ u32 txmultcollframes;
+ u32 txexcessivecollisions;
+ u32 txlatecollisions;
+ u32 txunderrun;
+ u32 txcarriersenseerrors;
+ u32 txoctets;
+ u32 octetframes64;
+ u32 octetframes65t127;
+ u32 octetframes128t255;
+ u32 octetframes256t511;
+ u32 octetframes512t1023;
+ u32 octetframes1024tup;
+ u32 netoctets;
+ u32 rxsofoverruns;
+ u32 rxmofoverruns;
+ u32 rxdmaoverruns;
+};
+
+struct cpsw_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int type;
+ int sizeof_stat;
+ int stat_offset;
+};
+
+enum {
+ CPSW_STATS,
+ CPDMA_RX_STATS,
+ CPDMA_TX_STATS,
+};
+
+#define CPSW_STAT(m) CPSW_STATS, \
+ sizeof_field(struct cpsw_hw_stats, m), \
+ offsetof(struct cpsw_hw_stats, m)
+#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
+ sizeof_field(struct cpdma_chan_stats, m), \
+ offsetof(struct cpdma_chan_stats, m)
+#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
+ sizeof_field(struct cpdma_chan_stats, m), \
+ offsetof(struct cpdma_chan_stats, m)
+
+static const struct cpsw_stats cpsw_gstrings_stats[] = {
+ { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
+ { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
+ { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
+ { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
+ { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
+ { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
+ { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
+ { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
+ { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
+ { "Rx Fragments", CPSW_STAT(rxfragments) },
+ { "Rx Octets", CPSW_STAT(rxoctets) },
+ { "Good Tx Frames", CPSW_STAT(txgoodframes) },
+ { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
+ { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
+ { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
+ { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
+ { "Collisions", CPSW_STAT(txcollisionframes) },
+ { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
+ { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
+ { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
+ { "Late Collisions", CPSW_STAT(txlatecollisions) },
+ { "Tx Underrun", CPSW_STAT(txunderrun) },
+ { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
+ { "Tx Octets", CPSW_STAT(txoctets) },
+ { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
+ { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
+ { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
+ { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
+ { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
+ { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
+ { "Net Octets", CPSW_STAT(netoctets) },
+ { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
+ { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
+ { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
+};
+
+static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
+ { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+ { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+ { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+ { "misqueued", CPDMA_RX_STAT(misqueued) },
+ { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+ { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+ { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+ { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+ { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+ { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+ { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+ { "requeue", CPDMA_RX_STAT(requeue) },
+ { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+};
+
+#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
+#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
+
+u32 cpsw_get_msglevel(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ return priv->msg_enable;
+}
+
+void cpsw_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ priv->msg_enable = value;
+}
+
+int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ coal->rx_coalesce_usecs = cpsw->coal_intvl;
+ return 0;
+}
+
+int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ u32 int_ctrl;
+ u32 num_interrupts = 0;
+ u32 prescale = 0;
+ u32 addnl_dvdr = 1;
+ u32 coal_intvl = 0;
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ coal_intvl = coal->rx_coalesce_usecs;
+
+ int_ctrl = readl(&cpsw->wr_regs->int_control);
+ prescale = cpsw->bus_freq_mhz * 4;
+
+ if (!coal->rx_coalesce_usecs) {
+ int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
+ goto update_return;
+ }
+
+ if (coal_intvl < CPSW_CMINTMIN_INTVL)
+ coal_intvl = CPSW_CMINTMIN_INTVL;
+
+ if (coal_intvl > CPSW_CMINTMAX_INTVL) {
+ /* Interrupt pacer works with 4us Pulse, we can
+ * throttle further by dilating the 4us pulse.
+ */
+ addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
+
+ if (addnl_dvdr > 1) {
+ prescale *= addnl_dvdr;
+ if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
+ coal_intvl = (CPSW_CMINTMAX_INTVL
+ * addnl_dvdr);
+ } else {
+ addnl_dvdr = 1;
+ coal_intvl = CPSW_CMINTMAX_INTVL;
+ }
+ }
+
+ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+ writel(num_interrupts, &cpsw->wr_regs->rx_imax);
+ writel(num_interrupts, &cpsw->wr_regs->tx_imax);
+
+ int_ctrl |= CPSW_INTPACEEN;
+ int_ctrl &= (~CPSW_INTPRESCALE_MASK);
+ int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
+
+update_return:
+ writel(int_ctrl, &cpsw->wr_regs->int_control);
+
+ cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
+ cpsw->coal_intvl = coal_intvl;
+
+ return 0;
+}
+
+int cpsw_get_sset_count(struct net_device *ndev, int sset)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return (CPSW_STATS_COMMON_LEN +
+ (cpsw->rx_ch_num + cpsw->tx_ch_num) *
+ CPSW_STATS_CH_LEN);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
+{
+ int ch_stats_len;
+ int line;
+ int i;
+
+ ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
+ for (i = 0; i < ch_stats_len; i++) {
+ line = i % CPSW_STATS_CH_LEN;
+ snprintf(*p, ETH_GSTRING_LEN,
+ "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
+ (long)(i / CPSW_STATS_CH_LEN),
+ cpsw_gstrings_ch_stats[line].stat_string);
+ *p += ETH_GSTRING_LEN;
+ }
+}
+
+void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ u8 *p = data;
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
+ memcpy(p, cpsw_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ p += ETH_GSTRING_LEN;
+ }
+
+ cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
+ cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
+ break;
+ }
+}
+
+void cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ u8 *p;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpdma_chan_stats ch_stats;
+ int i, l, ch;
+
+ /* Collect Davinci CPDMA stats for Rx and Tx Channel */
+ for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
+ data[l] = readl(cpsw->hw_stats +
+ cpsw_gstrings_stats[l].stat_offset);
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
+ }
+ }
+
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
+ for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+ p = (u8 *)&ch_stats +
+ cpsw_gstrings_ch_stats[i].stat_offset;
+ data[l] = *(u32 *)p;
+ }
+ }
+}
+
+void cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ pause->autoneg = AUTONEG_DISABLE;
+ pause->rx_pause = priv->rx_pause ? true : false;
+ pause->tx_pause = priv->tx_pause ? true : false;
+}
+
+void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ wol->supported = 0;
+ wol->wolopts = 0;
+
+ if (cpsw->slaves[slave_no].phy)
+ phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
+}
+
+int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
+ else
+ return -EOPNOTSUPP;
+}
+
+int cpsw_get_regs_len(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ return cpsw_ale_get_num_entries(cpsw->ale) *
+ ALE_ENTRY_WORDS * sizeof(u32);
+}
+
+void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p)
+{
+ u32 *reg = p;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ /* update CPSW IP version */
+ regs->version = cpsw->version;
+
+ cpsw_ale_dump(cpsw->ale, reg);
+}
+
+int cpsw_ethtool_op_begin(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
+
+ return ret;
+}
+
+void cpsw_ethtool_op_complete(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ ret = pm_runtime_put(priv->cpsw->dev);
+ if (ret < 0)
+ cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
+}
+
+void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+ ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
+ ch->max_combined = 0;
+ ch->max_other = 0;
+ ch->other_count = 0;
+ ch->rx_count = cpsw->rx_ch_num;
+ ch->tx_count = cpsw->tx_ch_num;
+ ch->combined_count = 0;
+}
+
+int cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+
+ phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
+ return 0;
+}
+
+int cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (!cpsw->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy, ecmd);
+}
+
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
+ else
+ return -EOPNOTSUPP;
+}
+
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
+ else
+ return -EOPNOTSUPP;
+}
+
+int cpsw_nway_reset(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+
+ if (cpsw->slaves[slave_no].phy)
+ return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
+ else
+ return -EOPNOTSUPP;
+}
+
+static void cpsw_suspend_data_pass(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ int i;
+
+ /* Disable NAPI scheduling */
+ cpsw_intr_disable(cpsw);
+
+ /* Stop all transmit queues for every network device.
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!(ndev && netif_running(ndev)))
+ continue;
+
+ netif_tx_stop_all_queues(ndev);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+ }
+
+ /* Handle rest of tx packets and stop cpdma channels */
+ cpdma_ctlr_stop(cpsw->dma);
+}
+
+static int cpsw_resume_data_pass(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int i, ret;
+
+ /* After this receive is started */
+ if (cpsw->usage_count) {
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret)
+ return ret;
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ }
+
+ /* Resume transmit for every affected interface */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (ndev && netif_running(ndev))
+ netif_tx_start_all_queues(ndev);
+ }
+
+ return 0;
+}
+
+static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
+ struct ethtool_channels *ch)
+{
+ if (cpsw->quirk_irq) {
+ dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
+ return -EOPNOTSUPP;
+ }
+
+ if (ch->combined_count)
+ return -EINVAL;
+
+ /* verify we have at least one channel in each direction */
+ if (!ch->rx_count || !ch->tx_count)
+ return -EINVAL;
+
+ if (ch->rx_count > cpsw->data.channels ||
+ ch->tx_count > cpsw->data.channels)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx,
+ cpdma_handler_fn rx_handler)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ void (*handler)(void *, int, int);
+ struct netdev_queue *queue;
+ struct cpsw_vector *vec;
+ int ret, *ch, vch;
+
+ if (rx) {
+ ch = &cpsw->rx_ch_num;
+ vec = cpsw->rxv;
+ handler = rx_handler;
+ } else {
+ ch = &cpsw->tx_ch_num;
+ vec = cpsw->txv;
+ handler = cpsw_tx_handler;
+ }
+
+ while (*ch < ch_num) {
+ vch = rx ? *ch : 7 - *ch;
+ vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
+ queue = netdev_get_tx_queue(priv->ndev, *ch);
+ queue->tx_maxrate = 0;
+
+ if (IS_ERR(vec[*ch].ch))
+ return PTR_ERR(vec[*ch].ch);
+
+ if (!vec[*ch].ch)
+ return -EINVAL;
+
+ cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ (*ch)++;
+ }
+
+ while (*ch > ch_num) {
+ (*ch)--;
+
+ ret = cpdma_chan_destroy(vec[*ch].ch);
+ if (ret)
+ return ret;
+
+ cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
+ (rx ? "rx" : "tx"));
+ }
+
+ return 0;
+}
+
+static void cpsw_fail(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (ndev)
+ dev_close(ndev);
+ }
+}
+
+int cpsw_set_channels_common(struct net_device *ndev,
+ struct ethtool_channels *chs,
+ cpdma_handler_fn rx_handler)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct net_device *sl_ndev;
+ int i, new_pools, ret;
+
+ ret = cpsw_check_ch_settings(cpsw, chs);
+ if (ret < 0)
+ return ret;
+
+ cpsw_suspend_data_pass(ndev);
+
+ new_pools = (chs->rx_count != cpsw->rx_ch_num) && cpsw->usage_count;
+
+ ret = cpsw_update_channels_res(priv, chs->rx_count, 1, rx_handler);
+ if (ret)
+ goto err;
+
+ ret = cpsw_update_channels_res(priv, chs->tx_count, 0, rx_handler);
+ if (ret)
+ goto err;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ sl_ndev = cpsw->slaves[i].ndev;
+ if (!(sl_ndev && netif_running(sl_ndev)))
+ continue;
+
+ /* Inform stack about new count of queues */
+ ret = netif_set_real_num_tx_queues(sl_ndev, cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto err;
+ }
+
+ ret = netif_set_real_num_rx_queues(sl_ndev, cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto err;
+ }
+ }
+
+ cpsw_split_res(cpsw);
+
+ if (new_pools) {
+ cpsw_destroy_xdp_rxqs(cpsw);
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret)
+ goto err;
+ }
+
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
+err:
+ dev_err(priv->dev, "cannot update channels number, closing device\n");
+ cpsw_fail(cpsw);
+ return ret;
+}
+
+void cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ /* not supported */
+ ering->tx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
+ ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
+ ering->rx_max_pending = cpsw->descs_pool_size - CPSW_MAX_QUEUES;
+ ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
+}
+
+int cpsw_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ int descs_num, ret;
+
+ /* ignore ering->tx_pending - only rx_pending adjustment is supported */
+
+ if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
+ ering->rx_pending < CPSW_MAX_QUEUES ||
+ ering->rx_pending > (cpsw->descs_pool_size - CPSW_MAX_QUEUES))
+ return -EINVAL;
+
+ descs_num = cpdma_get_num_rx_descs(cpsw->dma);
+ if (ering->rx_pending == descs_num)
+ return 0;
+
+ cpsw_suspend_data_pass(ndev);
+
+ ret = cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
+ if (ret) {
+ if (cpsw_resume_data_pass(ndev))
+ goto err;
+
+ return ret;
+ }
+
+ if (cpsw->usage_count) {
+ cpsw_destroy_xdp_rxqs(cpsw);
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret)
+ goto err;
+ }
+
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
+err:
+ cpdma_set_num_rx_descs(cpsw->dma, descs_num);
+ dev_err(cpsw->dev, "cannot set ring params, closing device\n");
+ cpsw_fail(cpsw);
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = cpsw->cpts->phc_index;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ return 0;
+}
+#else
+int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ info->tx_types = 0;
+ info->rx_filters = 0;
+ return 0;
+}
+#endif
diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c
new file mode 100644
index 000000000..83596ec0c
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_new.c
@@ -0,0 +1,2120 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/irqreturn.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/etherdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/phy.h>
+#include <linux/phy/phy.h>
+#include <linux/delay.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_device.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/sys_soc.h>
+
+#include <net/switchdev.h>
+#include <net/page_pool.h>
+#include <net/pkt_cls.h>
+#include <net/devlink.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
+#include "cpsw_switchdev.h"
+#include "cpts.h"
+#include "davinci_cpdma.h"
+
+#include <net/pkt_sched.h>
+
+static int debug_level;
+static int ale_ageout = CPSW_ALE_AGEOUT_DEFAULT;
+static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
+static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
+
+struct cpsw_devlink {
+ struct cpsw_common *cpsw;
+};
+
+enum cpsw_devlink_param_id {
+ CPSW_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ CPSW_DL_PARAM_SWITCH_MODE,
+ CPSW_DL_PARAM_ALE_BYPASS,
+};
+
+/* struct cpsw_common is not needed, kept here for compatibility
+ * reasons witrh the old driver
+ */
+static int cpsw_slave_index_priv(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv)
+{
+ if (priv->emac_port == HOST_PORT_NUM)
+ return -1;
+
+ return priv->emac_port - 1;
+}
+
+static bool cpsw_is_switch_en(struct cpsw_common *cpsw)
+{
+ return !cpsw->data.dual_emac;
+}
+
+static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ bool enable_uni = false;
+ int i;
+
+ if (cpsw_is_switch_en(cpsw))
+ return;
+
+ /* Enabling promiscuous mode for one interface will be
+ * common for both the interface as the interface shares
+ * the same hardware resource.
+ */
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev &&
+ (cpsw->slaves[i].ndev->flags & IFF_PROMISC))
+ enable_uni = true;
+
+ if (!enable && enable_uni) {
+ enable = enable_uni;
+ dev_dbg(cpsw->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
+ }
+
+ if (enable) {
+ /* Enable unknown unicast, reg/unreg mcast */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_P0_UNI_FLOOD, 1);
+
+ dev_dbg(cpsw->dev, "promiscuity enabled\n");
+ } else {
+ /* Disable unknown unicast */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(cpsw->dev, "promiscuity disabled\n");
+ }
+}
+
+/**
+ * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
+ * if it's not deleted
+ * @ndev: device to sync
+ * @addr: address to be added or deleted
+ * @vid: vlan id, if vid < 0 set/unset address for real device
+ * @add: add address if the flag is set or remove otherwise
+ */
+static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
+ int vid, int add)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int mask, flags, ret, slave_no;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (vid < 0)
+ vid = cpsw->slaves[slave_no].port_vlan;
+
+ mask = ALE_PORT_HOST;
+ flags = vid ? ALE_VLAN : 0;
+
+ if (add)
+ ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
+ else
+ ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
+
+ return ret;
+}
+
+static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0, ret = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (found)
+ sync_ctx->consumed++;
+
+ if (sync_ctx->flush) {
+ if (!found)
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+ }
+
+ if (found)
+ ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
+
+ return ret;
+}
+
+static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+ int ret;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 0;
+
+ ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num && !ret)
+ ret = cpsw_set_mc(ndev, addr, -1, 1);
+
+ return ret;
+}
+
+static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.consumed = 0;
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.flush = 1;
+
+ vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed == num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
+{
+ struct addr_sync_ctx *sync_ctx = ctx;
+ struct netdev_hw_addr *ha;
+ int found = 0;
+
+ if (!vdev || !(vdev->flags & IFF_UP))
+ return 0;
+
+ /* vlan address is relevant if its sync_cnt != 0 */
+ netdev_for_each_mc_addr(ha, vdev) {
+ if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
+ found = ha->sync_cnt;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+
+ sync_ctx->consumed++;
+ cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
+ return 0;
+}
+
+static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
+{
+ struct addr_sync_ctx sync_ctx;
+
+ sync_ctx.addr = addr;
+ sync_ctx.ndev = ndev;
+ sync_ctx.consumed = 0;
+
+ vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
+ if (sync_ctx.consumed < num)
+ cpsw_set_mc(ndev, addr, -1, 0);
+
+ return 0;
+}
+
+static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (ndev->flags & IFF_PROMISC) {
+ /* Enable promiscuous mode */
+ cpsw_set_promiscious(ndev, true);
+ cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI, priv->emac_port);
+ return;
+ }
+
+ /* Disable promiscuous mode */
+ cpsw_set_promiscious(ndev, false);
+
+ /* Restore allmulti on vlans if necessary */
+ cpsw_ale_set_allmulti(cpsw->ale,
+ ndev->flags & IFF_ALLMULTI, priv->emac_port);
+
+ /* add/remove mcast address either for real netdev or for vlan */
+ __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
+ cpsw_del_mc_addr);
+}
+
+static unsigned int cpsw_rxbuf_total_len(unsigned int len)
+{
+ len += CPSW_HEADROOM_NA;
+ len += SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ return SKB_DATA_ALIGN(len);
+}
+
+static void cpsw_rx_handler(void *token, int len, int status)
+{
+ struct page *new_page, *page = token;
+ void *pa = page_address(page);
+ int headroom = CPSW_HEADROOM_NA;
+ struct cpsw_meta_xdp *xmeta;
+ struct cpsw_common *cpsw;
+ struct net_device *ndev;
+ int port, ch, pkt_size;
+ struct cpsw_priv *priv;
+ struct page_pool *pool;
+ struct sk_buff *skb;
+ struct xdp_buff xdp;
+ int ret = 0;
+ dma_addr_t dma;
+
+ xmeta = pa + CPSW_XMETA_OFFSET;
+ cpsw = ndev_to_cpsw(xmeta->ndev);
+ ndev = xmeta->ndev;
+ pkt_size = cpsw->rx_packet_max;
+ ch = xmeta->ch;
+
+ if (status >= 0) {
+ port = CPDMA_RX_SOURCE_PORT(status);
+ if (port)
+ ndev = cpsw->slaves[--port].ndev;
+ }
+
+ priv = netdev_priv(ndev);
+ pool = cpsw->page_pool[ch];
+
+ if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
+ /* In dual emac mode check for all interfaces */
+ if (cpsw->usage_count && status >= 0) {
+ /* The packet received is for the interface which
+ * is already down and the other interface is up
+ * and running, instead of freeing which results
+ * in reducing of the number of rx descriptor in
+ * DMA engine, requeue page back to cpdma.
+ */
+ new_page = page;
+ goto requeue;
+ }
+
+ /* the interface is going down, pages are purged */
+ page_pool_recycle_direct(pool, page);
+ return;
+ }
+
+ new_page = page_pool_dev_alloc_pages(pool);
+ if (unlikely(!new_page)) {
+ new_page = page;
+ ndev->stats.rx_dropped++;
+ goto requeue;
+ }
+
+ if (priv->xdp_prog) {
+ int size = len;
+
+ xdp_init_buff(&xdp, PAGE_SIZE, &priv->xdp_rxq[ch]);
+ if (status & CPDMA_RX_VLAN_ENCAP) {
+ headroom += CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ size -= CPSW_RX_VLAN_ENCAP_HDR_SIZE;
+ }
+
+ xdp_prepare_buff(&xdp, pa, headroom, size, false);
+
+ ret = cpsw_run_xdp(priv, ch, &xdp, page, priv->emac_port, &len);
+ if (ret != CPSW_XDP_PASS)
+ goto requeue;
+
+ headroom = xdp.data - xdp.data_hard_start;
+
+ /* XDP prog can modify vlan tag, so can't use encap header */
+ status &= ~CPDMA_RX_VLAN_ENCAP;
+ }
+
+ /* pass skb to netstack if no XDP prog or returned XDP_PASS */
+ skb = build_skb(pa, cpsw_rxbuf_total_len(pkt_size));
+ if (!skb) {
+ ndev->stats.rx_dropped++;
+ page_pool_recycle_direct(pool, page);
+ goto requeue;
+ }
+
+ skb->offload_fwd_mark = priv->offload_fwd_mark;
+ skb_reserve(skb, headroom);
+ skb_put(skb, len);
+ skb->dev = ndev;
+ if (status & CPDMA_RX_VLAN_ENCAP)
+ cpsw_rx_vlan_encap(skb);
+ if (priv->rx_ts_enabled)
+ cpts_rx_timestamp(cpsw->cpts, skb);
+ skb->protocol = eth_type_trans(skb, ndev);
+
+ /* mark skb for recycling */
+ skb_mark_for_recycle(skb);
+ netif_receive_skb(skb);
+
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+
+requeue:
+ xmeta = page_address(new_page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(new_page) + CPSW_HEADROOM_NA;
+ ret = cpdma_chan_submit_mapped(cpsw->rxv[ch].ch, new_page, dma,
+ pkt_size, 0);
+ if (ret < 0) {
+ WARN_ON(ret == -ENOMEM);
+ page_pool_recycle_direct(pool, new_page);
+ }
+}
+
+static int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
+ unsigned short vid)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ int unreg_mcast_mask = 0;
+ int mcast_mask;
+ u32 port_mask;
+ int ret;
+
+ port_mask = (1 << priv->emac_port) | ALE_PORT_HOST;
+
+ mcast_mask = ALE_PORT_HOST;
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = mcast_mask;
+
+ ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
+ unreg_mcast_mask);
+ if (ret != 0)
+ return ret;
+
+ ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (ret != 0)
+ goto clean_vid;
+
+ ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ mcast_mask, ALE_VLAN, vid, 0);
+ if (ret != 0)
+ goto clean_vlan_ucast;
+ return 0;
+
+clean_vlan_ucast:
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+clean_vid:
+ cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ return ret;
+}
+
+static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret, i;
+
+ if (cpsw_is_switch_en(cpsw)) {
+ dev_dbg(cpsw->dev, ".ndo_vlan_rx_add_vid called in switch mode\n");
+ return 0;
+ }
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ /* In dual EMAC, reserved VLAN id should not be used for
+ * creating VLAN interfaces as this can break the dual
+ * EMAC port separation
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (cpsw->slaves[i].ndev &&
+ vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ dev_dbg(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
+ ret = cpsw_add_vlan_ale_entry(priv, vid);
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
+{
+ struct cpsw_priv *priv = arg;
+
+ if (!vdev || !vid)
+ return 0;
+
+ cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
+ return 0;
+}
+
+/* restore resources after port reset */
+static void cpsw_restore(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ /* restore vlan configurations */
+ vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
+
+ /* restore MQPRIO offload */
+ cpsw_mqprio_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ /* restore CBS offload */
+ cpsw_cbs_resume(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ cpsw_qos_clsflower_resume(priv);
+}
+
+static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw)
+{
+ static const char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0};
+
+ cpsw_ale_add_mcast(cpsw->ale, stpa,
+ ALE_PORT_HOST, ALE_SUPER, 0,
+ ALE_MCAST_BLOCK_LEARN_FWD);
+}
+
+static void cpsw_init_host_port_switch(struct cpsw_common *cpsw)
+{
+ int vlan = cpsw->data.default_vlan;
+
+ writel(CPSW_FIFO_NORMAL_MODE, &cpsw->host_port_regs->tx_in_ctl);
+
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
+ ALE_ALL_PORTS, ALE_ALL_PORTS,
+ ALE_PORT_1 | ALE_PORT_2);
+
+ cpsw_init_stp_ale_entry(cpsw);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 1);
+ dev_dbg(cpsw->dev, "Set P0_UNI_FLOOD\n");
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 0);
+}
+
+static void cpsw_init_host_port_dual_mac(struct cpsw_common *cpsw)
+{
+ int vlan = cpsw->data.default_vlan;
+
+ writel(CPSW_FIFO_DUAL_MAC_MODE, &cpsw->host_port_regs->tx_in_ctl);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_P0_UNI_FLOOD, 0);
+ dev_dbg(cpsw->dev, "unset P0_UNI_FLOOD\n");
+
+ writel(vlan, &cpsw->host_port_regs->port_vlan);
+
+ cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
+ /* learning make no sense in dual_mac mode */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_PORT_NOLEARN, 1);
+}
+
+static void cpsw_init_host_port(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 control_reg;
+
+ /* soft reset the controller and initialize ale */
+ soft_reset("cpsw", &cpsw->regs->soft_reset);
+ cpsw_ale_start(cpsw->ale);
+
+ /* switch to vlan unaware mode */
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
+ CPSW_ALE_VLAN_AWARE);
+ control_reg = readl(&cpsw->regs->control);
+ control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
+ writel(control_reg, &cpsw->regs->control);
+
+ /* setup host port priority mapping */
+ writel_relaxed(CPDMA_TX_PRIORITY_MAP,
+ &cpsw->host_port_regs->cpdma_tx_pri_map);
+ writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
+
+ /* disable priority elevation */
+ writel_relaxed(0, &cpsw->regs->ptype);
+
+ /* enable statistics collection only on all ports */
+ writel_relaxed(0x7, &cpsw->regs->stat_port_en);
+
+ /* Enable internal fifo flow control */
+ writel(0x7, &cpsw->regs->flow_control);
+
+ if (cpsw_is_switch_en(cpsw))
+ cpsw_init_host_port_switch(cpsw);
+ else
+ cpsw_init_host_port_dual_mac(cpsw);
+
+ cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+}
+
+static void cpsw_port_add_dual_emac_def_ale_entries(struct cpsw_priv *priv,
+ struct cpsw_slave *slave)
+{
+ u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 reg;
+
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+ slave_write(slave, slave->port_vlan, reg);
+
+ cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
+ port_mask, port_mask, 0);
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ ALE_PORT_HOST, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN |
+ ALE_SECURE, slave->port_vlan);
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 1);
+ /* learning make no sense in dual_mac mode */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NOLEARN, 1);
+}
+
+static void cpsw_port_add_switch_def_ale_entries(struct cpsw_priv *priv,
+ struct cpsw_slave *slave)
+{
+ u32 port_mask = 1 << priv->emac_port | ALE_PORT_HOST;
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 reg;
+
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_DROP_UNKNOWN_VLAN, 0);
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NOLEARN, 0);
+ /* disabling SA_UPDATE required to make stp work, without this setting
+ * Host MAC addresses will jump between ports.
+ * As per TRM MAC address can be defined as unicast supervisory (super)
+ * by setting both (ALE_BLOCKED | ALE_SECURE) which should prevent
+ * SA_UPDATE, but HW seems works incorrectly and setting ALE_SECURE
+ * causes STP packets to be dropped due to ingress filter
+ * if (source address found) and (secure) and
+ * (receive port number != port_number))
+ * then discard the packet
+ */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_NO_SA_UPDATE, 1);
+
+ cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, slave->port_vlan,
+ ALE_MCAST_FWD_2);
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, slave->port_vlan);
+
+ reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
+ CPSW2_PORT_VLAN;
+ slave_write(slave, slave->port_vlan, reg);
+}
+
+static void cpsw_adjust_link(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ struct phy_device *phy;
+ u32 mac_control = 0;
+
+ slave = &cpsw->slaves[priv->emac_port - 1];
+ phy = slave->phy;
+
+ if (!phy)
+ return;
+
+ if (phy->link) {
+ mac_control = CPSW_SL_CTL_GMII_EN;
+
+ if (phy->speed == 1000)
+ mac_control |= CPSW_SL_CTL_GIG;
+ if (phy->duplex)
+ mac_control |= CPSW_SL_CTL_FULLDUPLEX;
+
+ /* set speed_in input in case RMII mode is used in 100Mbps */
+ if (phy->speed == 100)
+ mac_control |= CPSW_SL_CTL_IFCTL_A;
+ /* in band mode only works in 10Mbps RGMII mode */
+ else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
+ mac_control |= CPSW_SL_CTL_EXT_EN; /* In Band mode */
+
+ if (priv->rx_pause)
+ mac_control |= CPSW_SL_CTL_RX_FLOW_EN;
+
+ if (priv->tx_pause)
+ mac_control |= CPSW_SL_CTL_TX_FLOW_EN;
+
+ if (mac_control != slave->mac_control)
+ cpsw_sl_ctl_set(slave->mac_sl, mac_control);
+
+ /* enable forwarding */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ netif_tx_wake_all_queues(ndev);
+
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed &&
+ !cpsw_shp_is_off(priv))
+ dev_warn(priv->dev, "Speed was changed, CBS shaper speeds are changed!");
+ } else {
+ netif_tx_stop_all_queues(ndev);
+
+ mac_control = 0;
+ /* disable forwarding */
+ cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+
+ cpsw_sl_wait_for_idle(slave->mac_sl, 100);
+
+ cpsw_sl_ctl_reset(slave->mac_sl);
+ }
+
+ if (mac_control != slave->mac_control)
+ phy_print_status(phy);
+
+ slave->mac_control = mac_control;
+
+ if (phy->link && cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+}
+
+static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct phy_device *phy;
+
+ cpsw_sl_reset(slave->mac_sl, 100);
+ cpsw_sl_ctl_reset(slave->mac_sl);
+
+ /* setup priority mapping */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_PRI_MAP,
+ RX_PRIORITY_MAPPING);
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
+ slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
+ /* Increase RX FIFO size to 5 for supporting fullduplex
+ * flow control mode
+ */
+ slave_write(slave,
+ (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
+ CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
+ break;
+ }
+
+ /* setup max packet size, and mac address */
+ cpsw_sl_reg_write(slave->mac_sl, CPSW_SL_RX_MAXLEN,
+ cpsw->rx_packet_max);
+ cpsw_set_slave_mac(slave, priv);
+
+ slave->mac_control = 0; /* no link yet */
+
+ if (cpsw_is_switch_en(cpsw))
+ cpsw_port_add_switch_def_ale_entries(priv, slave);
+ else
+ cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
+
+ if (!slave->data->phy_node)
+ dev_err(priv->dev, "no phy found on slave %d\n",
+ slave->slave_num);
+ phy = of_phy_connect(priv->ndev, slave->data->phy_node,
+ &cpsw_adjust_link, 0, slave->data->phy_if);
+ if (!phy) {
+ dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
+ slave->data->phy_node,
+ slave->slave_num);
+ return;
+ }
+ slave->phy = phy;
+
+ phy_attached_info(slave->phy);
+
+ phy_start(slave->phy);
+
+ /* Configure GMII_SEL register */
+ phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
+ slave->data->phy_if);
+}
+
+static int cpsw_ndo_stop(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+
+ cpsw_info(priv, ifdown, "shutting down ndev\n");
+ slave = &cpsw->slaves[priv->emac_port - 1];
+ if (slave->phy)
+ phy_stop(slave->phy);
+
+ netif_tx_stop_all_queues(priv->ndev);
+
+ if (slave->phy) {
+ phy_disconnect(slave->phy);
+ slave->phy = NULL;
+ }
+
+ __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
+
+ if (cpsw->usage_count <= 1) {
+ napi_disable(&cpsw->napi_rx);
+ napi_disable(&cpsw->napi_tx);
+ cpts_unregister(cpsw->cpts);
+ cpsw_intr_disable(cpsw);
+ cpdma_ctlr_stop(cpsw->dma);
+ cpsw_ale_stop(cpsw->ale);
+ cpsw_destroy_xdp_rxqs(cpsw);
+ }
+
+ if (cpsw_need_resplit(cpsw))
+ cpsw_split_res(cpsw);
+
+ cpsw->usage_count--;
+ pm_runtime_put_sync(cpsw->dev);
+ return 0;
+}
+
+static int cpsw_ndo_open(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+
+ dev_info(priv->dev, "starting ndev. mode: %s\n",
+ cpsw_is_switch_en(cpsw) ? "switch" : "dual_mac");
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ /* Notify the stack of the actual queue counts. */
+ ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of tx queues\n");
+ goto pm_cleanup;
+ }
+
+ ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
+ if (ret) {
+ dev_err(priv->dev, "cannot set real number of rx queues\n");
+ goto pm_cleanup;
+ }
+
+ /* Initialize host and slave ports */
+ if (!cpsw->usage_count)
+ cpsw_init_host_port(priv);
+ cpsw_slave_open(&cpsw->slaves[priv->emac_port - 1], priv);
+
+ /* initialize shared resources for every ndev */
+ if (!cpsw->usage_count) {
+ /* create rxqs for both infs in dual mac as they use same pool
+ * and must be destroyed together when no users.
+ */
+ ret = cpsw_create_xdp_rxqs(cpsw);
+ if (ret < 0)
+ goto err_cleanup;
+
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret < 0)
+ goto err_cleanup;
+
+ if (cpsw->cpts) {
+ if (cpts_register(cpsw->cpts))
+ dev_err(priv->dev, "error registering cpts device\n");
+ else
+ writel(0x10, &cpsw->wr_regs->misc_en);
+ }
+
+ napi_enable(&cpsw->napi_rx);
+ napi_enable(&cpsw->napi_tx);
+
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
+ }
+
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
+ }
+ }
+
+ cpsw_restore(priv);
+
+ /* Enable Interrupt pacing if configured */
+ if (cpsw->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = cpsw->coal_intvl;
+ cpsw_set_coalesce(ndev, &coal, NULL, NULL);
+ }
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ cpsw->usage_count++;
+
+ return 0;
+
+err_cleanup:
+ cpsw_ndo_stop(ndev);
+
+pm_cleanup:
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
+ struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpts *cpts = cpsw->cpts;
+ struct netdev_queue *txq;
+ struct cpdma_chan *txch;
+ int ret, q_idx;
+
+ if (skb_put_padto(skb, READ_ONCE(priv->tx_packet_min))) {
+ cpsw_err(priv, tx_err, "packet pad failed\n");
+ ndev->stats.tx_dropped++;
+ return NET_XMIT_DROP;
+ }
+
+ if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
+ priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+
+ q_idx = skb_get_queue_mapping(skb);
+ if (q_idx >= cpsw->tx_ch_num)
+ q_idx = q_idx % cpsw->tx_ch_num;
+
+ txch = cpsw->txv[q_idx].ch;
+ txq = netdev_get_tx_queue(ndev, q_idx);
+ skb_tx_timestamp(skb);
+ ret = cpdma_chan_submit(txch, skb, skb->data, skb->len,
+ priv->emac_port);
+ if (unlikely(ret != 0)) {
+ cpsw_err(priv, tx_err, "desc submit failed\n");
+ goto fail;
+ }
+
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+ }
+
+ return NETDEV_TX_OK;
+fail:
+ ndev->stats.tx_dropped++;
+ netif_tx_stop_queue(txq);
+
+ /* Barrier, so that stop_queue visible to other cpus */
+ smp_mb__after_atomic();
+
+ if (cpdma_check_free_tx_desc(txch))
+ netif_tx_wake_queue(txq);
+
+ return NETDEV_TX_BUSY;
+}
+
+static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
+{
+ struct sockaddr *addr = (struct sockaddr *)p;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret, slave_no;
+ int flags = 0;
+ u16 vid = 0;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ vid = cpsw->slaves[slave_no].port_vlan;
+ flags = ALE_VLAN | ALE_SECURE;
+
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
+ flags, vid);
+ cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
+ flags, vid);
+
+ ether_addr_copy(priv->mac_addr, addr->sa_data);
+ eth_hw_addr_set(ndev, priv->mac_addr);
+ cpsw_set_slave_mac(&cpsw->slaves[slave_no], priv);
+
+ pm_runtime_put(cpsw->dev);
+
+ return 0;
+}
+
+static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
+ __be16 proto, u16 vid)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ret;
+ int i;
+
+ if (cpsw_is_switch_en(cpsw)) {
+ dev_dbg(cpsw->dev, "ndo del vlan is called in switch mode\n");
+ return 0;
+ }
+
+ if (vid == cpsw->data.default_vlan)
+ return 0;
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ /* reset the return code as pm_runtime_get_sync() can return
+ * non zero values as well.
+ */
+ ret = 0;
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (cpsw->slaves[i].ndev &&
+ vid == cpsw->slaves[i].port_vlan) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+
+ dev_dbg(priv->dev, "removing vlanid %d from vlan filter\n", vid);
+ ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
+ if (ret)
+ dev_err(priv->dev, "cpsw_ale_del_vlan() failed: ret %d\n", ret);
+ ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (ret)
+ dev_err(priv->dev, "cpsw_ale_del_ucast() failed: ret %d\n",
+ ret);
+ ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ 0, ALE_VLAN, vid);
+ if (ret)
+ dev_err(priv->dev, "cpsw_ale_del_mcast failed. ret %d\n",
+ ret);
+ cpsw_ale_flush_multicast(cpsw->ale, ALE_PORT_HOST, vid);
+ ret = 0;
+err:
+ pm_runtime_put(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_ndo_get_phys_port_name(struct net_device *ndev, char *name,
+ size_t len)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err;
+
+ err = snprintf(name, len, "p%d", priv->emac_port);
+
+ if (err >= len)
+ return -EINVAL;
+
+ return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cpsw_ndo_poll_controller(struct net_device *ndev)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ cpsw_intr_disable(cpsw);
+ cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
+ cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
+ cpsw_intr_enable(cpsw);
+}
+#endif
+
+static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
+ struct xdp_frame **frames, u32 flags)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct xdp_frame *xdpf;
+ int i, nxmit = 0;
+
+ if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
+ return -EINVAL;
+
+ for (i = 0; i < n; i++) {
+ xdpf = frames[i];
+ if (xdpf->len < READ_ONCE(priv->tx_packet_min))
+ break;
+
+ if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
+ break;
+ nxmit++;
+ }
+
+ return nxmit;
+}
+
+static int cpsw_get_port_parent_id(struct net_device *ndev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ ppid->id_len = sizeof(cpsw->base_mac);
+ memcpy(&ppid->id, &cpsw->base_mac, ppid->id_len);
+
+ return 0;
+}
+
+static const struct net_device_ops cpsw_netdev_ops = {
+ .ndo_open = cpsw_ndo_open,
+ .ndo_stop = cpsw_ndo_stop,
+ .ndo_start_xmit = cpsw_ndo_start_xmit,
+ .ndo_set_mac_address = cpsw_ndo_set_mac_address,
+ .ndo_eth_ioctl = cpsw_ndo_ioctl,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_tx_timeout = cpsw_ndo_tx_timeout,
+ .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
+ .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = cpsw_ndo_poll_controller,
+#endif
+ .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
+ .ndo_setup_tc = cpsw_ndo_setup_tc,
+ .ndo_get_phys_port_name = cpsw_ndo_get_phys_port_name,
+ .ndo_bpf = cpsw_ndo_bpf,
+ .ndo_xdp_xmit = cpsw_ndo_xdp_xmit,
+ .ndo_get_port_parent_id = cpsw_get_port_parent_id,
+};
+
+static void cpsw_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct platform_device *pdev;
+
+ pdev = to_platform_device(cpsw->dev);
+ strscpy(info->driver, "cpsw-switch", sizeof(info->driver));
+ strscpy(info->version, "2.0", sizeof(info->version));
+ strscpy(info->bus_info, pdev->name, sizeof(info->bus_info));
+}
+
+static int cpsw_set_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int slave_no;
+
+ slave_no = cpsw_slave_index(cpsw, priv);
+ if (!cpsw->slaves[slave_no].phy)
+ return -EINVAL;
+
+ if (!phy_validate_pause(cpsw->slaves[slave_no].phy, pause))
+ return -EINVAL;
+
+ priv->rx_pause = pause->rx_pause ? true : false;
+ priv->tx_pause = pause->tx_pause ? true : false;
+
+ phy_set_asym_pause(cpsw->slaves[slave_no].phy,
+ priv->rx_pause, priv->tx_pause);
+
+ return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ return cpsw_set_channels_common(ndev, chs, cpsw_rx_handler);
+}
+
+static const struct ethtool_ops cpsw_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
+ .get_drvinfo = cpsw_get_drvinfo,
+ .get_msglevel = cpsw_get_msglevel,
+ .set_msglevel = cpsw_set_msglevel,
+ .get_link = ethtool_op_get_link,
+ .get_ts_info = cpsw_get_ts_info,
+ .get_coalesce = cpsw_get_coalesce,
+ .set_coalesce = cpsw_set_coalesce,
+ .get_sset_count = cpsw_get_sset_count,
+ .get_strings = cpsw_get_strings,
+ .get_ethtool_stats = cpsw_get_ethtool_stats,
+ .get_pauseparam = cpsw_get_pauseparam,
+ .set_pauseparam = cpsw_set_pauseparam,
+ .get_wol = cpsw_get_wol,
+ .set_wol = cpsw_set_wol,
+ .get_regs_len = cpsw_get_regs_len,
+ .get_regs = cpsw_get_regs,
+ .begin = cpsw_ethtool_op_begin,
+ .complete = cpsw_ethtool_op_complete,
+ .get_channels = cpsw_get_channels,
+ .set_channels = cpsw_set_channels,
+ .get_link_ksettings = cpsw_get_link_ksettings,
+ .set_link_ksettings = cpsw_set_link_ksettings,
+ .get_eee = cpsw_get_eee,
+ .set_eee = cpsw_set_eee,
+ .nway_reset = cpsw_nway_reset,
+ .get_ringparam = cpsw_get_ringparam,
+ .set_ringparam = cpsw_set_ringparam,
+};
+
+static int cpsw_probe_dt(struct cpsw_common *cpsw)
+{
+ struct device_node *node = cpsw->dev->of_node, *tmp_node, *port_np;
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct device *dev = cpsw->dev;
+ int ret;
+ u32 prop;
+
+ if (!node)
+ return -EINVAL;
+
+ tmp_node = of_get_child_by_name(node, "ethernet-ports");
+ if (!tmp_node)
+ return -ENOENT;
+ data->slaves = of_get_child_count(tmp_node);
+ if (data->slaves != CPSW_SLAVE_PORTS_NUM) {
+ of_node_put(tmp_node);
+ return -ENOENT;
+ }
+
+ data->active_slave = 0;
+ data->channels = CPSW_MAX_QUEUES;
+ data->dual_emac = true;
+ data->bd_ram_size = CPSW_BD_RAM_SIZE;
+ data->mac_control = 0;
+
+ data->slave_data = devm_kcalloc(dev, CPSW_SLAVE_PORTS_NUM,
+ sizeof(struct cpsw_slave_data),
+ GFP_KERNEL);
+ if (!data->slave_data) {
+ of_node_put(tmp_node);
+ return -ENOMEM;
+ }
+
+ /* Populate all the child nodes here...
+ */
+ ret = devm_of_platform_populate(dev);
+ /* We do not want to force this, as in some cases may not have child */
+ if (ret)
+ dev_warn(dev, "Doesn't have any child node\n");
+
+ for_each_child_of_node(tmp_node, port_np) {
+ struct cpsw_slave_data *slave_data;
+ u32 port_id;
+
+ ret = of_property_read_u32(port_np, "reg", &port_id);
+ if (ret < 0) {
+ dev_err(dev, "%pOF error reading port_id %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ if (!port_id || port_id > CPSW_SLAVE_PORTS_NUM) {
+ dev_err(dev, "%pOF has invalid port_id %u\n",
+ port_np, port_id);
+ ret = -EINVAL;
+ goto err_node_put;
+ }
+
+ slave_data = &data->slave_data[port_id - 1];
+
+ slave_data->disabled = !of_device_is_available(port_np);
+ if (slave_data->disabled)
+ continue;
+
+ slave_data->slave_node = port_np;
+ slave_data->ifphy = devm_of_phy_get(dev, port_np, NULL);
+ if (IS_ERR(slave_data->ifphy)) {
+ ret = PTR_ERR(slave_data->ifphy);
+ dev_err(dev, "%pOF: Error retrieving port phy: %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ if (of_phy_is_fixed_link(port_np)) {
+ ret = of_phy_register_fixed_link(port_np);
+ if (ret) {
+ dev_err_probe(dev, ret, "%pOF failed to register fixed-link phy\n",
+ port_np);
+ goto err_node_put;
+ }
+ slave_data->phy_node = of_node_get(port_np);
+ } else {
+ slave_data->phy_node =
+ of_parse_phandle(port_np, "phy-handle", 0);
+ }
+
+ if (!slave_data->phy_node) {
+ dev_err(dev, "%pOF no phy found\n", port_np);
+ ret = -ENODEV;
+ goto err_node_put;
+ }
+
+ ret = of_get_phy_mode(port_np, &slave_data->phy_if);
+ if (ret) {
+ dev_err(dev, "%pOF read phy-mode err %d\n",
+ port_np, ret);
+ goto err_node_put;
+ }
+
+ ret = of_get_mac_address(port_np, slave_data->mac_addr);
+ if (ret) {
+ ret = ti_cm_get_macid(dev, port_id - 1,
+ slave_data->mac_addr);
+ if (ret)
+ goto err_node_put;
+ }
+
+ if (of_property_read_u32(port_np, "ti,dual-emac-pvid",
+ &prop)) {
+ dev_err(dev, "%pOF Missing dual_emac_res_vlan in DT.\n",
+ port_np);
+ slave_data->dual_emac_res_vlan = port_id;
+ dev_err(dev, "%pOF Using %d as Reserved VLAN\n",
+ port_np, slave_data->dual_emac_res_vlan);
+ } else {
+ slave_data->dual_emac_res_vlan = prop;
+ }
+ }
+
+ of_node_put(tmp_node);
+ return 0;
+
+err_node_put:
+ of_node_put(port_np);
+ of_node_put(tmp_node);
+ return ret;
+}
+
+static void cpsw_remove_dt(struct cpsw_common *cpsw)
+{
+ struct cpsw_platform_data *data = &cpsw->data;
+ int i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+ struct device_node *port_np = slave_data->phy_node;
+
+ if (port_np) {
+ if (of_phy_is_fixed_link(port_np))
+ of_phy_deregister_fixed_link(port_np);
+
+ of_node_put(port_np);
+ }
+ }
+}
+
+static int cpsw_create_ports(struct cpsw_common *cpsw)
+{
+ struct cpsw_platform_data *data = &cpsw->data;
+ struct net_device *ndev, *napi_ndev = NULL;
+ struct device *dev = cpsw->dev;
+ struct cpsw_priv *priv;
+ int ret = 0, i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave_data *slave_data = &data->slave_data[i];
+
+ if (slave_data->disabled)
+ continue;
+
+ ndev = devm_alloc_etherdev_mqs(dev, sizeof(struct cpsw_priv),
+ CPSW_MAX_QUEUES,
+ CPSW_MAX_QUEUES);
+ if (!ndev) {
+ dev_err(dev, "error allocating net_device\n");
+ return -ENOMEM;
+ }
+
+ priv = netdev_priv(ndev);
+ priv->cpsw = cpsw;
+ priv->ndev = ndev;
+ priv->dev = dev;
+ priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
+ priv->emac_port = i + 1;
+ priv->tx_packet_min = CPSW_MIN_PACKET_SIZE;
+
+ if (is_valid_ether_addr(slave_data->mac_addr)) {
+ ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
+ dev_info(cpsw->dev, "Detected MACID = %pM\n",
+ priv->mac_addr);
+ } else {
+ eth_random_addr(slave_data->mac_addr);
+ dev_info(cpsw->dev, "Random MACID = %pM\n",
+ priv->mac_addr);
+ }
+ eth_hw_addr_set(ndev, slave_data->mac_addr);
+ ether_addr_copy(priv->mac_addr, slave_data->mac_addr);
+
+ cpsw->slaves[i].ndev = ndev;
+
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER |
+ NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_NETNS_LOCAL | NETIF_F_HW_TC;
+
+ ndev->netdev_ops = &cpsw_netdev_ops;
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ if (!napi_ndev) {
+ /* CPSW Host port CPDMA interface is shared between
+ * ports and there is only one TX and one RX IRQs
+ * available for all possible TX and RX channels
+ * accordingly.
+ */
+ netif_napi_add(ndev, &cpsw->napi_rx,
+ cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll);
+ netif_napi_add_tx(ndev, &cpsw->napi_tx,
+ cpsw->quirk_irq ?
+ cpsw_tx_poll : cpsw_tx_mq_poll);
+ }
+
+ napi_ndev = ndev;
+ }
+
+ return ret;
+}
+
+static void cpsw_unregister_ports(struct cpsw_common *cpsw)
+{
+ int i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (!cpsw->slaves[i].ndev)
+ continue;
+
+ unregister_netdev(cpsw->slaves[i].ndev);
+ }
+}
+
+static int cpsw_register_ports(struct cpsw_common *cpsw)
+{
+ int ret = 0, i = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ if (!cpsw->slaves[i].ndev)
+ continue;
+
+ /* register the network device */
+ ret = register_netdev(cpsw->slaves[i].ndev);
+ if (ret) {
+ dev_err(cpsw->dev,
+ "cpsw: err registering net device%d\n", i);
+ cpsw->slaves[i].ndev = NULL;
+ break;
+ }
+ }
+
+ if (ret)
+ cpsw_unregister_ports(cpsw);
+ return ret;
+}
+
+bool cpsw_port_dev_check(const struct net_device *ndev)
+{
+ if (ndev->netdev_ops == &cpsw_netdev_ops) {
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+ return !cpsw->data.dual_emac;
+ }
+
+ return false;
+}
+
+static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw)
+{
+ int set_val = 0;
+ int i;
+
+ if (!cpsw->ale_bypass &&
+ (cpsw->br_members == (ALE_PORT_1 | ALE_PORT_2)))
+ set_val = 1;
+
+ dev_dbg(cpsw->dev, "set offload_fwd_mark %d\n", set_val);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *sl_ndev = cpsw->slaves[i].ndev;
+ struct cpsw_priv *priv = netdev_priv(sl_ndev);
+
+ priv->offload_fwd_mark = set_val;
+ }
+}
+
+static int cpsw_netdevice_port_link(struct net_device *ndev,
+ struct net_device *br_ndev,
+ struct netlink_ext_ack *extack)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int err;
+
+ if (!cpsw->br_members) {
+ cpsw->hw_bridge_dev = br_ndev;
+ } else {
+ /* This is adding the port to a second bridge, this is
+ * unsupported
+ */
+ if (cpsw->hw_bridge_dev != br_ndev)
+ return -EOPNOTSUPP;
+ }
+
+ err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL,
+ false, extack);
+ if (err)
+ return err;
+
+ cpsw->br_members |= BIT(priv->emac_port);
+
+ cpsw_port_offload_fwd_mark_update(cpsw);
+
+ return NOTIFY_DONE;
+}
+
+static void cpsw_netdevice_port_unlink(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL);
+
+ cpsw->br_members &= ~BIT(priv->emac_port);
+
+ cpsw_port_offload_fwd_mark_update(cpsw);
+
+ if (!cpsw->br_members)
+ cpsw->hw_bridge_dev = NULL;
+}
+
+/* netdev notifier */
+static int cpsw_netdevice_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr);
+ struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
+ struct netdev_notifier_changeupper_info *info;
+ int ret = NOTIFY_DONE;
+
+ if (!cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_CHANGEUPPER:
+ info = ptr;
+
+ if (netif_is_bridge_master(info->upper_dev)) {
+ if (info->linking)
+ ret = cpsw_netdevice_port_link(ndev,
+ info->upper_dev,
+ extack);
+ else
+ cpsw_netdevice_port_unlink(ndev);
+ }
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return notifier_from_errno(ret);
+}
+
+static struct notifier_block cpsw_netdevice_nb __read_mostly = {
+ .notifier_call = cpsw_netdevice_event,
+};
+
+static int cpsw_register_notifiers(struct cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_netdevice_notifier(&cpsw_netdevice_nb);
+ if (ret) {
+ dev_err(cpsw->dev, "can't register netdevice notifier\n");
+ return ret;
+ }
+
+ ret = cpsw_switchdev_register_notifiers(cpsw);
+ if (ret)
+ unregister_netdevice_notifier(&cpsw_netdevice_nb);
+
+ return ret;
+}
+
+static void cpsw_unregister_notifiers(struct cpsw_common *cpsw)
+{
+ cpsw_switchdev_unregister_notifiers(cpsw);
+ unregister_netdevice_notifier(&cpsw_netdevice_nb);
+}
+
+static const struct devlink_ops cpsw_devlink_ops = {
+};
+
+static int cpsw_dl_switch_mode_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ ctx->val.vbool = !cpsw->data.dual_emac;
+
+ return 0;
+}
+
+static int cpsw_dl_switch_mode_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+ int vlan = cpsw->data.default_vlan;
+ bool switch_en = ctx->val.vbool;
+ bool if_running = false;
+ int i;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ if (id != CPSW_DL_PARAM_SWITCH_MODE)
+ return -EOPNOTSUPP;
+
+ if (switch_en == !cpsw->data.dual_emac)
+ return 0;
+
+ if (!switch_en && cpsw->br_members) {
+ dev_err(cpsw->dev, "Remove ports from BR before disabling switch mode\n");
+ return -EINVAL;
+ }
+
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+
+ if (!sl_ndev || !netif_running(sl_ndev))
+ continue;
+
+ if_running = true;
+ }
+
+ if (!if_running) {
+ /* all ndevs are down */
+ cpsw->data.dual_emac = !switch_en;
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+
+ if (!sl_ndev)
+ continue;
+
+ if (switch_en)
+ vlan = cpsw->data.default_vlan;
+ else
+ vlan = slave->data->dual_emac_res_vlan;
+ slave->port_vlan = vlan;
+ }
+ goto exit;
+ }
+
+ if (switch_en) {
+ dev_info(cpsw->dev, "Enable switch mode\n");
+
+ /* enable bypass - no forwarding; all traffic goes to Host */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+
+ /* clean up ALE table */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
+
+ cpsw_init_host_port_switch(cpsw);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(sl_ndev);
+ slave->port_vlan = vlan;
+ WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE_VLAN);
+ if (netif_running(sl_ndev))
+ cpsw_port_add_switch_def_ale_entries(priv,
+ slave);
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
+ cpsw->data.dual_emac = false;
+ } else {
+ dev_info(cpsw->dev, "Disable switch mode\n");
+
+ /* enable bypass - no forwarding; all traffic goes to Host */
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 1);
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_CLEAR, 1);
+ cpsw_ale_control_get(cpsw->ale, 0, ALE_AGEOUT);
+
+ cpsw_init_host_port_dual_mac(cpsw);
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ struct net_device *sl_ndev = slave->ndev;
+ struct cpsw_priv *priv;
+
+ if (!sl_ndev)
+ continue;
+
+ priv = netdev_priv(slave->ndev);
+ slave->port_vlan = slave->data->dual_emac_res_vlan;
+ WRITE_ONCE(priv->tx_packet_min, CPSW_MIN_PACKET_SIZE);
+ cpsw_port_add_dual_emac_def_ale_entries(priv, slave);
+ }
+
+ cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS, 0);
+ cpsw->data.dual_emac = true;
+ }
+exit:
+ rtnl_unlock();
+
+ return 0;
+}
+
+static int cpsw_dl_ale_ctrl_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ switch (id) {
+ case CPSW_DL_PARAM_ALE_BYPASS:
+ ctx->val.vbool = cpsw_ale_control_get(cpsw->ale, 0, ALE_BYPASS);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_dl_ale_ctrl_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx)
+{
+ struct cpsw_devlink *dl_priv = devlink_priv(dl);
+ struct cpsw_common *cpsw = dl_priv->cpsw;
+ int ret = -EOPNOTSUPP;
+
+ dev_dbg(cpsw->dev, "%s id:%u\n", __func__, id);
+
+ switch (id) {
+ case CPSW_DL_PARAM_ALE_BYPASS:
+ ret = cpsw_ale_control_set(cpsw->ale, 0, ALE_BYPASS,
+ ctx->val.vbool);
+ if (!ret) {
+ cpsw->ale_bypass = ctx->val.vbool;
+ cpsw_port_offload_fwd_mark_update(cpsw);
+ }
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static const struct devlink_param cpsw_devlink_params[] = {
+ DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_SWITCH_MODE,
+ "switch_mode", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ cpsw_dl_switch_mode_get, cpsw_dl_switch_mode_set,
+ NULL),
+ DEVLINK_PARAM_DRIVER(CPSW_DL_PARAM_ALE_BYPASS,
+ "ale_bypass", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_RUNTIME),
+ cpsw_dl_ale_ctrl_get, cpsw_dl_ale_ctrl_set, NULL),
+};
+
+static int cpsw_register_devlink(struct cpsw_common *cpsw)
+{
+ struct device *dev = cpsw->dev;
+ struct cpsw_devlink *dl_priv;
+ int ret = 0;
+
+ cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv), dev);
+ if (!cpsw->devlink)
+ return -ENOMEM;
+
+ dl_priv = devlink_priv(cpsw->devlink);
+ dl_priv->cpsw = cpsw;
+
+ ret = devlink_params_register(cpsw->devlink, cpsw_devlink_params,
+ ARRAY_SIZE(cpsw_devlink_params));
+ if (ret) {
+ dev_err(dev, "DL params reg fail ret:%d\n", ret);
+ goto dl_unreg;
+ }
+
+ devlink_register(cpsw->devlink);
+ return ret;
+
+dl_unreg:
+ devlink_free(cpsw->devlink);
+ return ret;
+}
+
+static void cpsw_unregister_devlink(struct cpsw_common *cpsw)
+{
+ devlink_unregister(cpsw->devlink);
+ devlink_params_unregister(cpsw->devlink, cpsw_devlink_params,
+ ARRAY_SIZE(cpsw_devlink_params));
+ devlink_free(cpsw->devlink);
+}
+
+static const struct of_device_id cpsw_of_mtable[] = {
+ { .compatible = "ti,cpsw-switch"},
+ { .compatible = "ti,am335x-cpsw-switch"},
+ { .compatible = "ti,am4372-cpsw-switch"},
+ { .compatible = "ti,dra7-cpsw-switch"},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
+
+static const struct soc_device_attribute cpsw_soc_devices[] = {
+ { .family = "AM33xx", .revision = "ES1.0"},
+ { /* sentinel */ }
+};
+
+static int cpsw_probe(struct platform_device *pdev)
+{
+ const struct soc_device_attribute *soc;
+ struct device *dev = &pdev->dev;
+ struct cpsw_common *cpsw;
+ struct resource *ss_res;
+ struct gpio_descs *mode;
+ void __iomem *ss_regs;
+ int ret = 0, ch;
+ struct clk *clk;
+ int irq;
+
+ cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
+ if (!cpsw)
+ return -ENOMEM;
+
+ cpsw_slave_index = cpsw_slave_index_priv;
+
+ cpsw->dev = dev;
+
+ cpsw->slaves = devm_kcalloc(dev,
+ CPSW_SLAVE_PORTS_NUM,
+ sizeof(struct cpsw_slave),
+ GFP_KERNEL);
+ if (!cpsw->slaves)
+ return -ENOMEM;
+
+ mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
+ if (IS_ERR(mode)) {
+ ret = PTR_ERR(mode);
+ dev_err(dev, "gpio request failed, ret %d\n", ret);
+ return ret;
+ }
+
+ clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ dev_err(dev, "fck is not found %d\n", ret);
+ return ret;
+ }
+ cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
+
+ ss_regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ss_res);
+ if (IS_ERR(ss_regs)) {
+ ret = PTR_ERR(ss_regs);
+ return ret;
+ }
+ cpsw->regs = ss_regs;
+
+ irq = platform_get_irq_byname(pdev, "rx");
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[0] = irq;
+
+ irq = platform_get_irq_byname(pdev, "tx");
+ if (irq < 0)
+ return irq;
+ cpsw->irqs_table[1] = irq;
+
+ irq = platform_get_irq_byname(pdev, "misc");
+ if (irq <= 0)
+ return irq;
+ cpsw->misc_irq = irq;
+
+ platform_set_drvdata(pdev, cpsw);
+ /* This may be required here for child devices. */
+ pm_runtime_enable(dev);
+
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ ret = pm_runtime_resume_and_get(dev);
+ if (ret < 0) {
+ pm_runtime_disable(dev);
+ return ret;
+ }
+
+ ret = cpsw_probe_dt(cpsw);
+ if (ret)
+ goto clean_dt_ret;
+
+ soc = soc_device_match(cpsw_soc_devices);
+ if (soc)
+ cpsw->quirk_irq = true;
+
+ cpsw->rx_packet_max = rx_packet_max;
+ cpsw->descs_pool_size = descs_pool_size;
+ eth_random_addr(cpsw->base_mac);
+
+ ret = cpsw_init_common(cpsw, ss_regs, ale_ageout,
+ (u32 __force)ss_res->start + CPSW2_BD_OFFSET,
+ descs_pool_size);
+ if (ret)
+ goto clean_dt_ret;
+
+ cpsw->wr_regs = cpsw->version == CPSW_VERSION_1 ?
+ ss_regs + CPSW1_WR_OFFSET :
+ ss_regs + CPSW2_WR_OFFSET;
+
+ ch = cpsw->quirk_irq ? 0 : 7;
+ cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
+ if (IS_ERR(cpsw->txv[0].ch)) {
+ dev_err(dev, "error initializing tx dma channel\n");
+ ret = PTR_ERR(cpsw->txv[0].ch);
+ goto clean_cpts;
+ }
+
+ cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
+ if (IS_ERR(cpsw->rxv[0].ch)) {
+ dev_err(dev, "error initializing rx dma channel\n");
+ ret = PTR_ERR(cpsw->rxv[0].ch);
+ goto clean_cpts;
+ }
+ cpsw_split_res(cpsw);
+
+ /* setup netdevs */
+ ret = cpsw_create_ports(cpsw);
+ if (ret)
+ goto clean_unregister_netdev;
+
+ /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
+ * MISC IRQs which are always kept disabled with this driver so
+ * we will not request them.
+ *
+ * If anyone wants to implement support for those, make sure to
+ * first request and append them to irqs_table array.
+ */
+
+ ret = devm_request_irq(dev, cpsw->irqs_table[0], cpsw_rx_interrupt,
+ 0, dev_name(dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ ret = devm_request_irq(dev, cpsw->irqs_table[1], cpsw_tx_interrupt,
+ 0, dev_name(dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ if (!cpsw->cpts)
+ goto skip_cpts;
+
+ ret = devm_request_irq(dev, cpsw->misc_irq, cpsw_misc_interrupt,
+ 0, dev_name(&pdev->dev), cpsw);
+ if (ret < 0) {
+ dev_err(dev, "error attaching misc irq (%d)\n", ret);
+ goto clean_unregister_netdev;
+ }
+
+ /* Enable misc CPTS evnt_pend IRQ */
+ cpts_set_irqpoll(cpsw->cpts, false);
+
+skip_cpts:
+ ret = cpsw_register_notifiers(cpsw);
+ if (ret)
+ goto clean_unregister_netdev;
+
+ ret = cpsw_register_devlink(cpsw);
+ if (ret)
+ goto clean_unregister_notifiers;
+
+ ret = cpsw_register_ports(cpsw);
+ if (ret)
+ goto clean_unregister_notifiers;
+
+ dev_notice(dev, "initialized (regs %pa, pool size %d) hw_ver:%08X %d.%d (%d)\n",
+ &ss_res->start, descs_pool_size,
+ cpsw->version, CPSW_MAJOR_VERSION(cpsw->version),
+ CPSW_MINOR_VERSION(cpsw->version),
+ CPSW_RTL_VERSION(cpsw->version));
+
+ pm_runtime_put(dev);
+
+ return 0;
+
+clean_unregister_notifiers:
+ cpsw_unregister_notifiers(cpsw);
+clean_unregister_netdev:
+ cpsw_unregister_ports(cpsw);
+clean_cpts:
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+clean_dt_ret:
+ cpsw_remove_dt(cpsw);
+ pm_runtime_put_sync(dev);
+ pm_runtime_disable(dev);
+ return ret;
+}
+
+static int cpsw_remove(struct platform_device *pdev)
+{
+ struct cpsw_common *cpsw = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_resume_and_get(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ cpsw_unregister_notifiers(cpsw);
+ cpsw_unregister_devlink(cpsw);
+ cpsw_unregister_ports(cpsw);
+
+ cpts_release(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+ cpsw_remove_dt(cpsw);
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static int __maybe_unused cpsw_suspend(struct device *dev)
+{
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
+
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *ndev = cpsw->slaves[i].ndev;
+
+ if (!(ndev && netif_running(ndev)))
+ continue;
+
+ cpsw_ndo_stop(ndev);
+ }
+
+ rtnl_unlock();
+
+ /* Select sleep pin state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int __maybe_unused cpsw_resume(struct device *dev)
+{
+ struct cpsw_common *cpsw = dev_get_drvdata(dev);
+ int i;
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(dev);
+
+ /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
+ rtnl_lock();
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct net_device *ndev = cpsw->slaves[i].ndev;
+
+ if (!(ndev && netif_running(ndev)))
+ continue;
+
+ cpsw_ndo_open(ndev);
+ }
+
+ rtnl_unlock();
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
+
+static struct platform_driver cpsw_driver = {
+ .driver = {
+ .name = "cpsw-switch",
+ .pm = &cpsw_pm_ops,
+ .of_match_table = cpsw_of_mtable,
+ },
+ .probe = cpsw_probe,
+ .remove = cpsw_remove,
+};
+
+module_platform_driver(cpsw_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("TI CPSW switchdev Ethernet driver");
diff --git a/drivers/net/ethernet/ti/cpsw_priv.c b/drivers/net/ethernet/ti/cpsw_priv.c
new file mode 100644
index 000000000..758295c89
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_priv.c
@@ -0,0 +1,1582 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ */
+
+#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+#include <linux/kmemleak.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/net_tstamp.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/skbuff.h>
+#include <net/page_pool.h>
+#include <net/pkt_cls.h>
+
+#include "cpsw.h"
+#include "cpts.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_sl.h"
+#include "davinci_cpdma.h"
+
+#define CPTS_N_ETX_TS 4
+
+int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
+
+void cpsw_intr_enable(struct cpsw_common *cpsw)
+{
+ writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(cpsw->dma, true);
+}
+
+void cpsw_intr_disable(struct cpsw_common *cpsw)
+{
+ writel_relaxed(0, &cpsw->wr_regs->tx_en);
+ writel_relaxed(0, &cpsw->wr_regs->rx_en);
+
+ cpdma_ctlr_int_ctrl(cpsw->dma, false);
+}
+
+void cpsw_tx_handler(void *token, int len, int status)
+{
+ struct cpsw_meta_xdp *xmeta;
+ struct xdp_frame *xdpf;
+ struct net_device *ndev;
+ struct netdev_queue *txq;
+ struct sk_buff *skb;
+ int ch;
+
+ if (cpsw_is_xdpf_handle(token)) {
+ xdpf = cpsw_handle_to_xdpf(token);
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ ndev = xmeta->ndev;
+ ch = xmeta->ch;
+ xdp_return_frame(xdpf);
+ } else {
+ skb = token;
+ ndev = skb->dev;
+ ch = skb_get_queue_mapping(skb);
+ cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
+ dev_kfree_skb_any(skb);
+ }
+
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
+ txq = netdev_get_tx_queue(ndev, ch);
+ if (unlikely(netif_tx_queue_stopped(txq)))
+ netif_tx_wake_queue(txq);
+
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+}
+
+irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ writel(0, &cpsw->wr_regs->tx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
+
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[1]);
+ cpsw->tx_irq_disabled = true;
+ }
+
+ napi_schedule(&cpsw->napi_tx);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ writel(0, &cpsw->wr_regs->rx_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
+
+ if (cpsw->quirk_irq) {
+ disable_irq_nosync(cpsw->irqs_table[0]);
+ cpsw->rx_irq_disabled = true;
+ }
+
+ napi_schedule(&cpsw->napi_rx);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
+{
+ struct cpsw_common *cpsw = dev_id;
+
+ writel(0, &cpsw->wr_regs->misc_en);
+ cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
+ cpts_misc_interrupt(cpsw->cpts);
+ writel(0x10, &cpsw->wr_regs->misc_en);
+
+ return IRQ_HANDLED;
+}
+
+int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx, cur_budget, ch;
+ u32 ch_map;
+ struct cpsw_vector *txv;
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+ for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
+ if (!(ch_map & 0x80))
+ continue;
+
+ txv = &cpsw->txv[ch];
+ if (unlikely(txv->budget > budget - num_tx))
+ cur_budget = budget - num_tx;
+ else
+ cur_budget = txv->budget;
+
+ num_tx += cpdma_chan_process(txv->ch, cur_budget);
+ if (num_tx >= budget)
+ break;
+ }
+
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ }
+
+ return num_tx;
+}
+
+int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
+ int num_tx;
+
+ num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
+ if (num_tx < budget) {
+ napi_complete(napi_tx);
+ writel(0xff, &cpsw->wr_regs->tx_en);
+ if (cpsw->tx_irq_disabled) {
+ cpsw->tx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[1]);
+ }
+ }
+
+ return num_tx;
+}
+
+int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx, cur_budget, ch;
+ u32 ch_map;
+ struct cpsw_vector *rxv;
+
+ /* process every unprocessed channel */
+ ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+ for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
+ if (!(ch_map & 0x01))
+ continue;
+
+ rxv = &cpsw->rxv[ch];
+ if (unlikely(rxv->budget > budget - num_rx))
+ cur_budget = budget - num_rx;
+ else
+ cur_budget = rxv->budget;
+
+ num_rx += cpdma_chan_process(rxv->ch, cur_budget);
+ if (num_rx >= budget)
+ break;
+ }
+
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ }
+
+ return num_rx;
+}
+
+int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
+{
+ struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
+ int num_rx;
+
+ num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
+ if (num_rx < budget) {
+ napi_complete_done(napi_rx, num_rx);
+ writel(0xff, &cpsw->wr_regs->rx_en);
+ if (cpsw->rx_irq_disabled) {
+ cpsw->rx_irq_disabled = false;
+ enable_irq(cpsw->irqs_table[0]);
+ }
+ }
+
+ return num_rx;
+}
+
+void cpsw_rx_vlan_encap(struct sk_buff *skb)
+{
+ struct cpsw_priv *priv = netdev_priv(skb->dev);
+ u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
+ struct cpsw_common *cpsw = priv->cpsw;
+ u16 vtag, vid, prio, pkt_type;
+
+ /* Remove VLAN header encapsulation word */
+ skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
+
+ pkt_type = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
+ /* Ignore unknown & Priority-tagged packets*/
+ if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
+ pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
+ return;
+
+ vid = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
+ VLAN_VID_MASK;
+ /* Ignore vid 0 and pass packet as is */
+ if (!vid)
+ return;
+
+ /* Untag P0 packets if set for vlan */
+ if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
+ prio = (rx_vlan_encap_hdr >>
+ CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
+ CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
+
+ vtag = (prio << VLAN_PRIO_SHIFT) | vid;
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
+ }
+
+ /* strip vlan tag for VLAN-tagged packet */
+ if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
+ memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
+ skb_pull(skb, VLAN_HLEN);
+ }
+}
+
+void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
+ slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
+}
+
+void soft_reset(const char *module, void __iomem *reg)
+{
+ unsigned long timeout = jiffies + HZ;
+
+ writel_relaxed(1, reg);
+ do {
+ cpu_relax();
+ } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
+
+ WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
+}
+
+void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int ch;
+
+ cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
+ ndev->stats.tx_errors++;
+ cpsw_intr_disable(cpsw);
+ for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+ cpdma_chan_stop(cpsw->txv[ch].ch);
+ cpdma_chan_start(cpsw->txv[ch].ch);
+ }
+
+ cpsw_intr_enable(cpsw);
+ netif_trans_update(ndev);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static int cpsw_get_common_speed(struct cpsw_common *cpsw)
+{
+ int i, speed;
+
+ for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
+ speed += cpsw->slaves[i].phy->speed;
+
+ return speed;
+}
+
+int cpsw_need_resplit(struct cpsw_common *cpsw)
+{
+ int i, rlim_ch_num;
+ int speed, ch_rate;
+
+ /* re-split resources only in case speed was changed */
+ speed = cpsw_get_common_speed(cpsw);
+ if (speed == cpsw->speed || !speed)
+ return 0;
+
+ cpsw->speed = speed;
+
+ for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
+ if (!ch_rate)
+ break;
+
+ rlim_ch_num++;
+ }
+
+ /* cases not dependent on speed */
+ if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
+ return 0;
+
+ return 1;
+}
+
+void cpsw_split_res(struct cpsw_common *cpsw)
+{
+ u32 consumed_rate = 0, bigest_rate = 0;
+ struct cpsw_vector *txv = cpsw->txv;
+ int i, ch_weight, rlim_ch_num = 0;
+ int budget, bigest_rate_ch = 0;
+ u32 ch_rate, max_rate;
+ int ch_budget = 0;
+
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (!ch_rate)
+ continue;
+
+ rlim_ch_num++;
+ consumed_rate += ch_rate;
+ }
+
+ if (cpsw->tx_ch_num == rlim_ch_num) {
+ max_rate = consumed_rate;
+ } else if (!rlim_ch_num) {
+ ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num;
+ bigest_rate = 0;
+ max_rate = consumed_rate;
+ } else {
+ max_rate = cpsw->speed * 1000;
+
+ /* if max_rate is less then expected due to reduced link speed,
+ * split proportionally according next potential max speed
+ */
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ if (max_rate < consumed_rate)
+ max_rate *= 10;
+
+ ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate;
+ ch_budget = (NAPI_POLL_WEIGHT - ch_budget) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ bigest_rate = (max_rate - consumed_rate) /
+ (cpsw->tx_ch_num - rlim_ch_num);
+ }
+
+ /* split tx weight/budget */
+ budget = NAPI_POLL_WEIGHT;
+ for (i = 0; i < cpsw->tx_ch_num; i++) {
+ ch_rate = cpdma_chan_get_rate(txv[i].ch);
+ if (ch_rate) {
+ txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate;
+ if (!txv[i].budget)
+ txv[i].budget++;
+ if (ch_rate > bigest_rate) {
+ bigest_rate_ch = i;
+ bigest_rate = ch_rate;
+ }
+
+ ch_weight = (ch_rate * 100) / max_rate;
+ if (!ch_weight)
+ ch_weight++;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
+ } else {
+ txv[i].budget = ch_budget;
+ if (!bigest_rate_ch)
+ bigest_rate_ch = i;
+ cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
+ }
+
+ budget -= txv[i].budget;
+ }
+
+ if (budget)
+ txv[bigest_rate_ch].budget += budget;
+
+ /* split rx budget */
+ budget = NAPI_POLL_WEIGHT;
+ ch_budget = budget / cpsw->rx_ch_num;
+ for (i = 0; i < cpsw->rx_ch_num; i++) {
+ cpsw->rxv[i].budget = ch_budget;
+ budget -= ch_budget;
+ }
+
+ if (budget)
+ cpsw->rxv[0].budget += budget;
+}
+
+int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
+ int ale_ageout, phys_addr_t desc_mem_phys,
+ int descs_pool_size)
+{
+ u32 slave_offset, sliver_offset, slave_size;
+ struct cpsw_ale_params ale_params;
+ struct cpsw_platform_data *data;
+ struct cpdma_params dma_params;
+ struct device *dev = cpsw->dev;
+ struct device_node *cpts_node;
+ void __iomem *cpts_regs;
+ int ret = 0, i;
+
+ data = &cpsw->data;
+ cpsw->rx_ch_num = 1;
+ cpsw->tx_ch_num = 1;
+
+ cpsw->version = readl(&cpsw->regs->id_ver);
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ memset(&ale_params, 0, sizeof(ale_params));
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
+ cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
+ dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
+ slave_offset = CPSW1_SLAVE_OFFSET;
+ slave_size = CPSW1_SLAVE_SIZE;
+ sliver_offset = CPSW1_SLIVER_OFFSET;
+ dma_params.desc_mem_phys = 0;
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ case CPSW_VERSION_4:
+ cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
+ cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
+ cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
+ dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
+ dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
+ ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
+ slave_offset = CPSW2_SLAVE_OFFSET;
+ slave_size = CPSW2_SLAVE_SIZE;
+ sliver_offset = CPSW2_SLIVER_OFFSET;
+ dma_params.desc_mem_phys = desc_mem_phys;
+ break;
+ default:
+ dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
+ return -ENODEV;
+ }
+
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ struct cpsw_slave *slave = &cpsw->slaves[i];
+ void __iomem *regs = cpsw->regs;
+
+ slave->slave_num = i;
+ slave->data = &cpsw->data.slave_data[i];
+ slave->regs = regs + slave_offset;
+ slave->port_vlan = slave->data->dual_emac_res_vlan;
+ slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
+ if (IS_ERR(slave->mac_sl))
+ return PTR_ERR(slave->mac_sl);
+
+ slave_offset += slave_size;
+ sliver_offset += SLIVER_SIZE;
+ }
+
+ ale_params.dev = dev;
+ ale_params.ale_ageout = ale_ageout;
+ ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
+ ale_params.dev_id = "cpsw";
+ ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000;
+
+ cpsw->ale = cpsw_ale_create(&ale_params);
+ if (IS_ERR(cpsw->ale)) {
+ dev_err(dev, "error initializing ale engine\n");
+ return PTR_ERR(cpsw->ale);
+ }
+
+ dma_params.dev = dev;
+ dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
+ dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
+ dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
+ dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
+ dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
+
+ dma_params.num_chan = data->channels;
+ dma_params.has_soft_reset = true;
+ dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
+ dma_params.desc_mem_size = data->bd_ram_size;
+ dma_params.desc_align = 16;
+ dma_params.has_ext_regs = true;
+ dma_params.desc_hw_addr = dma_params.desc_mem_phys;
+ dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
+ dma_params.descs_pool_size = descs_pool_size;
+
+ cpsw->dma = cpdma_ctlr_create(&dma_params);
+ if (!cpsw->dma) {
+ dev_err(dev, "error initializing dma\n");
+ return -ENOMEM;
+ }
+
+ cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
+ if (!cpts_node)
+ cpts_node = cpsw->dev->of_node;
+
+ cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
+ CPTS_N_ETX_TS);
+ if (IS_ERR(cpsw->cpts)) {
+ ret = PTR_ERR(cpsw->cpts);
+ cpdma_ctlr_destroy(cpsw->dma);
+ }
+ of_node_put(cpts_node);
+
+ return ret;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
+static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ u32 ts_en, seq_id;
+
+ if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
+ slave_write(slave, 0, CPSW1_TS_CTL);
+ return;
+ }
+
+ seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+ ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
+
+ if (priv->tx_ts_enabled)
+ ts_en |= CPSW_V1_TS_TX_EN;
+
+ if (priv->rx_ts_enabled)
+ ts_en |= CPSW_V1_TS_RX_EN;
+
+ slave_write(slave, ts_en, CPSW1_TS_CTL);
+ slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
+}
+
+static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 ctrl, mtype;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+
+ ctrl = slave_read(slave, CPSW2_CONTROL);
+ switch (cpsw->version) {
+ case CPSW_VERSION_2:
+ ctrl &= ~CTRL_V2_ALL_TS_MASK;
+
+ if (priv->tx_ts_enabled)
+ ctrl |= CTRL_V2_TX_TS_BITS;
+
+ if (priv->rx_ts_enabled)
+ ctrl |= CTRL_V2_RX_TS_BITS;
+ break;
+ case CPSW_VERSION_3:
+ default:
+ ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+ if (priv->tx_ts_enabled)
+ ctrl |= CTRL_V3_TX_TS_BITS;
+
+ if (priv->rx_ts_enabled)
+ ctrl |= CTRL_V3_RX_TS_BITS;
+ break;
+ }
+
+ mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
+
+ slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
+ slave_write(slave, ctrl, CPSW2_CONTROL);
+ writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
+ writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct hwtstamp_config cfg;
+
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
+ return -ERANGE;
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ priv->rx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ return -ERANGE;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
+
+ switch (cpsw->version) {
+ case CPSW_VERSION_1:
+ cpsw_hwtstamp_v1(priv);
+ break;
+ case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
+ cpsw_hwtstamp_v2(priv);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ struct cpsw_common *cpsw = ndev_to_cpsw(dev);
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct hwtstamp_config cfg;
+
+ if (cpsw->version != CPSW_VERSION_1 &&
+ cpsw->version != CPSW_VERSION_2 &&
+ cpsw->version != CPSW_VERSION_3)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = priv->rx_ts_enabled;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+#else
+static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
+{
+ return -EOPNOTSUPP;
+}
+#endif /*CONFIG_TI_CPTS*/
+
+int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+ struct cpsw_priv *priv = netdev_priv(dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int slave_no = cpsw_slave_index(cpsw, priv);
+ struct phy_device *phy;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ phy = cpsw->slaves[slave_no].phy;
+
+ if (!phy_has_hwtstamp(phy)) {
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return cpsw_hwtstamp_set(dev, req);
+ case SIOCGHWTSTAMP:
+ return cpsw_hwtstamp_get(dev, req);
+ }
+ }
+
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 min_rate;
+ u32 ch_rate;
+ int i, ret;
+
+ ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
+ if (ch_rate == rate)
+ return 0;
+
+ ch_rate = rate * 1000;
+ min_rate = cpdma_chan_get_min_rate(cpsw->dma);
+ if ((ch_rate < min_rate && ch_rate)) {
+ dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
+ min_rate);
+ return -EINVAL;
+ }
+
+ if (rate > cpsw->speed) {
+ dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
+ return -EINVAL;
+ }
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
+ pm_runtime_put(cpsw->dev);
+
+ if (ret)
+ return ret;
+
+ /* update rates for slaves tx queues */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ slave = &cpsw->slaves[i];
+ if (!slave->ndev)
+ continue;
+
+ netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
+ }
+
+ cpsw_split_res(cpsw);
+ return ret;
+}
+
+static int cpsw_tc_to_fifo(int tc, int num_tc)
+{
+ if (tc == num_tc - 1)
+ return 0;
+
+ return CPSW_FIFO_SHAPERS_NUM - tc;
+}
+
+bool cpsw_shp_is_off(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = 7 << shift;
+ val = val & mask;
+
+ return !val;
+}
+
+static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 shift, mask, val;
+
+ val = readl_relaxed(&cpsw->regs->ptype);
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
+ mask = (1 << --fifo) << shift;
+ val = on ? val | mask : val & ~mask;
+
+ writel_relaxed(val, &cpsw->regs->ptype);
+}
+
+static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 val = 0, send_pct, shift;
+ struct cpsw_slave *slave;
+ int pct = 0, i;
+
+ if (bw > priv->shp_cfg_speed * 1000)
+ goto err;
+
+ /* shaping has to stay enabled for highest fifos linearly
+ * and fifo bw no more then interface can allow
+ */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ send_pct = slave_read(slave, SEND_PERCENT);
+ for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
+ if (!bw) {
+ if (i >= fifo || !priv->fifo_bw[i])
+ continue;
+
+ dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
+ continue;
+ }
+
+ if (!priv->fifo_bw[i] && i > fifo) {
+ dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
+ return -EINVAL;
+ }
+
+ shift = (i - 1) * 8;
+ if (i == fifo) {
+ send_pct &= ~(CPSW_PCT_MASK << shift);
+ val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
+ if (!val)
+ val = 1;
+
+ send_pct |= val << shift;
+ pct += val;
+ continue;
+ }
+
+ if (priv->fifo_bw[i])
+ pct += (send_pct >> shift) & CPSW_PCT_MASK;
+ }
+
+ if (pct >= 100)
+ goto err;
+
+ slave_write(slave, send_pct, SEND_PERCENT);
+ priv->fifo_bw[fifo] = bw;
+
+ dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
+ DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
+
+ return 0;
+err:
+ dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
+ return -EINVAL;
+}
+
+static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ u32 tx_in_ctl_rg, val;
+ int ret;
+
+ ret = cpsw_set_fifo_bw(priv, fifo, bw);
+ if (ret)
+ return ret;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
+
+ if (!bw)
+ cpsw_fifo_shp_on(priv, fifo, bw);
+
+ val = slave_read(slave, tx_in_ctl_rg);
+ if (cpsw_shp_is_off(priv)) {
+ /* disable FIFOs rate limited queues */
+ val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
+
+ /* set type of FIFO queues to normal priority mode */
+ val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
+
+ /* set type of FIFO queues to be rate limited */
+ if (bw)
+ val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
+ else
+ priv->shp_cfg_speed = 0;
+ }
+
+ /* toggle a FIFO rate limited queue */
+ if (bw)
+ val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ else
+ val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
+ slave_write(slave, val, tx_in_ctl_rg);
+
+ /* FIFO transmit shape enable */
+ cpsw_fifo_shp_on(priv, fifo, bw);
+ return 0;
+}
+
+/* Defaults:
+ * class A - prio 3
+ * class B - prio 2
+ * shaping for class A should be set first
+ */
+static int cpsw_set_cbs(struct net_device *ndev,
+ struct tc_cbs_qopt_offload *qopt)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int prev_speed = 0;
+ int tc, ret, fifo;
+ u32 bw = 0;
+
+ tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
+
+ /* enable channels in backward order, as highest FIFOs must be rate
+ * limited first and for compliance with CPDMA rate limited channels
+ * that also used in bacward order. FIFO0 cannot be rate limited.
+ */
+ fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
+ if (!fifo) {
+ dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
+ return -EINVAL;
+ }
+
+ /* do nothing, it's disabled anyway */
+ if (!qopt->enable && !priv->fifo_bw[fifo])
+ return 0;
+
+ /* shapers can be set if link speed is known */
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ if (slave->phy && slave->phy->link) {
+ if (priv->shp_cfg_speed &&
+ priv->shp_cfg_speed != slave->phy->speed)
+ prev_speed = priv->shp_cfg_speed;
+
+ priv->shp_cfg_speed = slave->phy->speed;
+ }
+
+ if (!priv->shp_cfg_speed) {
+ dev_err(priv->dev, "Link speed is not known");
+ return -1;
+ }
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ bw = qopt->enable ? qopt->idleslope : 0;
+ ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
+ if (ret) {
+ priv->shp_cfg_speed = prev_speed;
+ prev_speed = 0;
+ }
+
+ if (bw && prev_speed)
+ dev_warn(priv->dev,
+ "Speed was changed, CBS shaper speeds are changed!");
+
+ pm_runtime_put_sync(cpsw->dev);
+ return ret;
+}
+
+static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
+{
+ struct tc_mqprio_qopt_offload *mqprio = type_data;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int fifo, num_tc, count, offset;
+ struct cpsw_slave *slave;
+ u32 tx_prio_map = 0;
+ int i, tc, ret;
+
+ num_tc = mqprio->qopt.num_tc;
+ if (num_tc > CPSW_TC_NUM)
+ return -EINVAL;
+
+ if (mqprio->mode != TC_MQPRIO_MODE_DCB)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(cpsw->dev);
+ if (ret < 0)
+ return ret;
+
+ if (num_tc) {
+ for (i = 0; i < 8; i++) {
+ tc = mqprio->qopt.prio_tc_map[i];
+ fifo = cpsw_tc_to_fifo(tc, num_tc);
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ netdev_set_num_tc(ndev, num_tc);
+ for (i = 0; i < num_tc; i++) {
+ count = mqprio->qopt.count[i];
+ offset = mqprio->qopt.offset[i];
+ netdev_set_tc_queue(ndev, i, count, offset);
+ }
+ }
+
+ if (!mqprio->qopt.hw) {
+ /* restore default configuration */
+ netdev_reset_tc(ndev);
+ tx_prio_map = TX_PRIORITY_MAPPING;
+ }
+
+ priv->mqprio_hw = mqprio->qopt.hw;
+
+ offset = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
+ slave_write(slave, tx_prio_map, offset);
+
+ pm_runtime_put_sync(cpsw->dev);
+
+ return 0;
+}
+
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
+
+int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data)
+{
+ switch (type) {
+ case TC_SETUP_QDISC_CBS:
+ return cpsw_set_cbs(ndev, type_data);
+
+ case TC_SETUP_QDISC_MQPRIO:
+ return cpsw_set_mqprio(ndev, type_data);
+
+ case TC_SETUP_BLOCK:
+ return cpsw_qos_setup_tc_block(ndev, type_data);
+
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ int fifo, bw;
+
+ for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
+ bw = priv->fifo_bw[fifo];
+ if (!bw)
+ continue;
+
+ cpsw_set_fifo_rlimit(priv, fifo, bw);
+ }
+}
+
+void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 tx_prio_map = 0;
+ int i, tc, fifo;
+ u32 tx_prio_rg;
+
+ if (!priv->mqprio_hw)
+ return;
+
+ for (i = 0; i < 8; i++) {
+ tc = netdev_get_prio_tc_map(priv->ndev, i);
+ fifo = CPSW_FIFO_SHAPERS_NUM - tc;
+ tx_prio_map |= fifo << (4 * i);
+ }
+
+ tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
+ CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
+
+ slave_write(slave, tx_prio_map, tx_prio_rg);
+}
+
+int cpsw_fill_rx_channels(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_meta_xdp *xmeta;
+ struct page_pool *pool;
+ struct page *page;
+ int ch_buf_num;
+ int ch, i, ret;
+ dma_addr_t dma;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ pool = cpsw->page_pool[ch];
+ ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+ for (i = 0; i < ch_buf_num; i++) {
+ page = page_pool_dev_alloc_pages(pool);
+ if (!page) {
+ cpsw_err(priv, ifup, "allocate rx page err\n");
+ return -ENOMEM;
+ }
+
+ xmeta = page_address(page) + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = ch;
+
+ dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
+ ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
+ page, dma,
+ cpsw->rx_packet_max,
+ 0);
+ if (ret < 0) {
+ cpsw_err(priv, ifup,
+ "cannot submit page to channel %d rx, error %d\n",
+ ch, ret);
+ page_pool_recycle_direct(pool, page);
+ return ret;
+ }
+ }
+
+ cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
+ ch, ch_buf_num);
+ }
+
+ return 0;
+}
+
+static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
+ int size)
+{
+ struct page_pool_params pp_params = {};
+ struct page_pool *pool;
+
+ pp_params.order = 0;
+ pp_params.flags = PP_FLAG_DMA_MAP;
+ pp_params.pool_size = size;
+ pp_params.nid = NUMA_NO_NODE;
+ pp_params.dma_dir = DMA_BIDIRECTIONAL;
+ pp_params.dev = cpsw->dev;
+
+ pool = page_pool_create(&pp_params);
+ if (IS_ERR(pool))
+ dev_err(cpsw->dev, "cannot create rx page pool\n");
+
+ return pool;
+}
+
+static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
+{
+ struct page_pool *pool;
+ int ret = 0, pool_size;
+
+ pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
+ pool = cpsw_create_page_pool(cpsw, pool_size);
+ if (IS_ERR(pool))
+ ret = PTR_ERR(pool);
+ else
+ cpsw->page_pool[ch] = pool;
+
+ return ret;
+}
+
+static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct xdp_rxq_info *rxq;
+ struct page_pool *pool;
+ int ret;
+
+ pool = cpsw->page_pool[ch];
+ rxq = &priv->xdp_rxq[ch];
+
+ ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
+ if (ret)
+ return ret;
+
+ ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
+ if (ret)
+ xdp_rxq_info_unreg(rxq);
+
+ return ret;
+}
+
+static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
+{
+ struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
+
+ if (!xdp_rxq_info_is_reg(rxq))
+ return;
+
+ xdp_rxq_info_unreg(rxq);
+}
+
+void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
+ }
+
+ page_pool_destroy(cpsw->page_pool[ch]);
+ cpsw->page_pool[ch] = NULL;
+ }
+}
+
+int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
+{
+ struct net_device *ndev;
+ int i, ch, ret;
+
+ for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+ ret = cpsw_create_rx_pool(cpsw, ch);
+ if (ret)
+ goto err_cleanup;
+
+ /* using same page pool is allowed as no running rx handlers
+ * simultaneously for both ndevs
+ */
+ for (i = 0; i < cpsw->data.slaves; i++) {
+ ndev = cpsw->slaves[i].ndev;
+ if (!ndev)
+ continue;
+
+ ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
+ if (ret)
+ goto err_cleanup;
+ }
+ }
+
+ return 0;
+
+err_cleanup:
+ cpsw_destroy_xdp_rxqs(cpsw);
+
+ return ret;
+}
+
+static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
+{
+ struct bpf_prog *prog = bpf->prog;
+
+ if (!priv->xdpi.prog && !prog)
+ return 0;
+
+ WRITE_ONCE(priv->xdp_prog, prog);
+
+ xdp_attachment_setup(&priv->xdpi, bpf);
+
+ return 0;
+}
+
+int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ return cpsw_xdp_prog_setup(priv, bpf);
+
+ default:
+ return -EINVAL;
+ }
+}
+
+int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
+ struct page *page, int port)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_meta_xdp *xmeta;
+ struct cpdma_chan *txch;
+ dma_addr_t dma;
+ int ret;
+
+ xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
+ xmeta->ndev = priv->ndev;
+ xmeta->ch = 0;
+ txch = cpsw->txv[0].ch;
+
+ if (page) {
+ dma = page_pool_get_dma_addr(page);
+ dma += xdpf->headroom + sizeof(struct xdp_frame);
+ ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
+ dma, xdpf->len, port);
+ } else {
+ if (sizeof(*xmeta) > xdpf->headroom)
+ return -EINVAL;
+
+ ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
+ xdpf->data, xdpf->len, port);
+ }
+
+ if (ret)
+ priv->ndev->stats.tx_dropped++;
+
+ return ret;
+}
+
+int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
+ struct page *page, int port, int *len)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct net_device *ndev = priv->ndev;
+ int ret = CPSW_XDP_CONSUMED;
+ struct xdp_frame *xdpf;
+ struct bpf_prog *prog;
+ u32 act;
+
+ prog = READ_ONCE(priv->xdp_prog);
+ if (!prog)
+ return CPSW_XDP_PASS;
+
+ act = bpf_prog_run_xdp(prog, xdp);
+ /* XDP prog might have changed packet data and boundaries */
+ *len = xdp->data_end - xdp->data;
+
+ switch (act) {
+ case XDP_PASS:
+ ret = CPSW_XDP_PASS;
+ goto out;
+ case XDP_TX:
+ xdpf = xdp_convert_buff_to_frame(xdp);
+ if (unlikely(!xdpf))
+ goto drop;
+
+ if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
+ xdp_return_frame_rx_napi(xdpf);
+ break;
+ case XDP_REDIRECT:
+ if (xdp_do_redirect(ndev, xdp, prog))
+ goto drop;
+
+ /* Have to flush here, per packet, instead of doing it in bulk
+ * at the end of the napi handler. The RX devices on this
+ * particular hardware is sharing a common queue, so the
+ * incoming device might change per packet.
+ */
+ xdp_do_flush_map();
+ break;
+ default:
+ bpf_warn_invalid_xdp_action(ndev, prog, act);
+ fallthrough;
+ case XDP_ABORTED:
+ trace_xdp_exception(ndev, prog, act);
+ fallthrough; /* handle aborts by dropping packet */
+ case XDP_DROP:
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
+ goto drop;
+ }
+
+ ndev->stats.rx_bytes += *len;
+ ndev->stats.rx_packets++;
+out:
+ return ret;
+drop:
+ page_pool_recycle_direct(cpsw->page_pool[ch], page);
+ return ret;
+}
+
+static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
+ struct netlink_ext_ack *extack,
+ struct flow_cls_offload *cls,
+ u64 rate_pkt_ps)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct flow_dissector *dissector = rule->match.dissector;
+ static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
+ struct flow_match_eth_addrs match;
+ u32 port_id;
+ int ret;
+
+ if (dissector->used_keys &
+ ~(BIT(FLOW_DISSECTOR_KEY_BASIC) |
+ BIT(FLOW_DISSECTOR_KEY_CONTROL) |
+ BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Unsupported keys used");
+ return -EOPNOTSUPP;
+ }
+
+ if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+ NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
+ return -EOPNOTSUPP;
+ }
+
+ flow_rule_match_eth_addrs(rule, &match);
+
+ if (!is_zero_ether_addr(match.mask->src)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Matching on source MAC not supported");
+ return -EOPNOTSUPP;
+ }
+
+ port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (is_broadcast_ether_addr(match.key->dst) &&
+ is_broadcast_ether_addr(match.mask->dst)) {
+ ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_bc_ratelimit.cookie = cls->cookie;
+ priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
+ ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
+ ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
+ if (ret)
+ return ret;
+
+ priv->ale_mc_ratelimit.cookie = cls->cookie;
+ priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
+ } else {
+ NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
+ const struct flow_action_entry *act,
+ struct netlink_ext_ack *extack)
+{
+ if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when exceed action is not drop");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
+ act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is not pipe or ok");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
+ !flow_action_is_last_entry(action, act)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when conform action is ok, but action is not last");
+ return -EOPNOTSUPP;
+ }
+
+ if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
+ act->police.avrate || act->police.overhead) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
+ return -EOPNOTSUPP;
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
+ struct netlink_ext_ack *extack = cls->common.extack;
+ const struct flow_action_entry *act;
+ int i, ret;
+
+ flow_action_for_each(i, act, &rule->action) {
+ switch (act->id) {
+ case FLOW_ACTION_POLICE:
+ ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
+ if (ret)
+ return ret;
+
+ return cpsw_qos_clsflower_add_policer(priv, extack, cls,
+ act->police.rate_pkt_ps);
+ default:
+ NL_SET_ERR_MSG_MOD(extack, "Action not supported");
+ return -EOPNOTSUPP;
+ }
+ }
+ return -EOPNOTSUPP;
+}
+
+static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
+ priv->ale_bc_ratelimit.cookie = 0;
+ priv->ale_bc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
+ }
+
+ if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
+ priv->ale_mc_ratelimit.cookie = 0;
+ priv->ale_mc_ratelimit.rate_packet_ps = 0;
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
+ }
+
+ return 0;
+}
+
+static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
+{
+ switch (cls_flower->command) {
+ case FLOW_CLS_REPLACE:
+ return cpsw_qos_configure_clsflower(priv, cls_flower);
+ case FLOW_CLS_DESTROY:
+ return cpsw_qos_delete_clsflower(priv, cls_flower);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ struct cpsw_priv *priv = cb_priv;
+ int ret;
+
+ if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
+ return -EOPNOTSUPP;
+
+ ret = pm_runtime_get_sync(priv->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(priv->dev);
+ return ret;
+ }
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ pm_runtime_put(priv->dev);
+ return ret;
+}
+
+static LIST_HEAD(cpsw_qos_block_cb_list);
+
+static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+
+ return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
+ cpsw_qos_setup_tc_block_cb,
+ priv, priv, true);
+}
+
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
+{
+ u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
+
+ if (priv->ale_bc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
+ priv->ale_bc_ratelimit.rate_packet_ps);
+
+ if (priv->ale_mc_ratelimit.cookie)
+ cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
+ priv->ale_mc_ratelimit.rate_packet_ps);
+}
diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h
new file mode 100644
index 000000000..34230145c
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_priv.h
@@ -0,0 +1,514 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch Driver
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
+#define DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_
+
+#include <uapi/linux/bpf.h>
+
+#include "davinci_cpdma.h"
+
+#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
+ NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_IFUP | NETIF_MSG_INTR | \
+ NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
+ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_RX_STATUS)
+
+#define cpsw_info(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_info(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_err(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_err(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_dbg(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_dbg(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define cpsw_notice(priv, type, format, ...) \
+do { \
+ if (netif_msg_##type(priv) && net_ratelimit()) \
+ dev_notice(priv->dev, format, ## __VA_ARGS__); \
+} while (0)
+
+#define ALE_ALL_PORTS 0x7
+
+#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
+#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
+#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
+
+#define CPSW_VERSION_1 0x19010a
+#define CPSW_VERSION_2 0x19010c
+#define CPSW_VERSION_3 0x19010f
+#define CPSW_VERSION_4 0x190112
+
+#define HOST_PORT_NUM 0
+#define CPSW_ALE_PORTS_NUM 3
+#define CPSW_SLAVE_PORTS_NUM 2
+#define SLIVER_SIZE 0x40
+
+#define CPSW1_HOST_PORT_OFFSET 0x028
+#define CPSW1_SLAVE_OFFSET 0x050
+#define CPSW1_SLAVE_SIZE 0x040
+#define CPSW1_CPDMA_OFFSET 0x100
+#define CPSW1_STATERAM_OFFSET 0x200
+#define CPSW1_HW_STATS 0x400
+#define CPSW1_CPTS_OFFSET 0x500
+#define CPSW1_ALE_OFFSET 0x600
+#define CPSW1_SLIVER_OFFSET 0x700
+#define CPSW1_WR_OFFSET 0x900
+
+#define CPSW2_HOST_PORT_OFFSET 0x108
+#define CPSW2_SLAVE_OFFSET 0x200
+#define CPSW2_SLAVE_SIZE 0x100
+#define CPSW2_CPDMA_OFFSET 0x800
+#define CPSW2_HW_STATS 0x900
+#define CPSW2_STATERAM_OFFSET 0xa00
+#define CPSW2_CPTS_OFFSET 0xc00
+#define CPSW2_ALE_OFFSET 0xd00
+#define CPSW2_SLIVER_OFFSET 0xd80
+#define CPSW2_BD_OFFSET 0x2000
+#define CPSW2_WR_OFFSET 0x1200
+
+#define CPDMA_RXTHRESH 0x0c0
+#define CPDMA_RXFREE 0x0e0
+#define CPDMA_TXHDP 0x00
+#define CPDMA_RXHDP 0x20
+#define CPDMA_TXCP 0x40
+#define CPDMA_RXCP 0x60
+
+#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
+#define CPSW_MIN_PACKET_SIZE_VLAN (VLAN_ETH_ZLEN)
+#define CPSW_MIN_PACKET_SIZE (ETH_ZLEN)
+#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
+ ETH_FCS_LEN +\
+ CPSW_RX_VLAN_ENCAP_HDR_SIZE)
+
+#define RX_PRIORITY_MAPPING 0x76543210
+#define TX_PRIORITY_MAPPING 0x33221100
+#define CPDMA_TX_PRIORITY_MAP 0x76543210
+
+#define CPSW_VLAN_AWARE BIT(1)
+#define CPSW_RX_VLAN_ENCAP BIT(2)
+#define CPSW_ALE_VLAN_AWARE 1
+
+#define CPSW_FIFO_NORMAL_MODE (0 << 16)
+#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
+#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
+
+#define CPSW_INTPACEEN (0x3f << 16)
+#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
+#define CPSW_CMINTMAX_CNT 63
+#define CPSW_CMINTMIN_CNT 2
+#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
+#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
+
+#define IRQ_NUM 2
+#define CPSW_MAX_QUEUES 8
+#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
+#define CPSW_ALE_AGEOUT_DEFAULT 10 /* sec */
+#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
+#define CPSW_FIFO_SHAPE_EN_SHIFT 16
+#define CPSW_FIFO_RATE_EN_SHIFT 20
+#define CPSW_TC_NUM 4
+#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
+#define CPSW_PCT_MASK 0x7f
+#define CPSW_BD_RAM_SIZE 0x2000
+
+#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
+#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
+#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16
+#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8
+#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0)
+enum {
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
+ CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
+};
+
+struct cpsw_wr_regs {
+ u32 id_ver;
+ u32 soft_reset;
+ u32 control;
+ u32 int_control;
+ u32 rx_thresh_en;
+ u32 rx_en;
+ u32 tx_en;
+ u32 misc_en;
+ u32 mem_allign1[8];
+ u32 rx_thresh_stat;
+ u32 rx_stat;
+ u32 tx_stat;
+ u32 misc_stat;
+ u32 mem_allign2[8];
+ u32 rx_imax;
+ u32 tx_imax;
+
+};
+
+struct cpsw_ss_regs {
+ u32 id_ver;
+ u32 control;
+ u32 soft_reset;
+ u32 stat_port_en;
+ u32 ptype;
+ u32 soft_idle;
+ u32 thru_rate;
+ u32 gap_thresh;
+ u32 tx_start_wds;
+ u32 flow_control;
+ u32 vlan_ltype;
+ u32 ts_ltype;
+ u32 dlr_ltype;
+};
+
+/* CPSW_PORT_V1 */
+#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
+#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
+#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
+#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
+#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
+#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
+#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
+
+/* CPSW_PORT_V2 */
+#define CPSW2_CONTROL 0x00 /* Control Register */
+#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
+#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
+#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
+#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
+#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
+#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
+
+/* CPSW_PORT_V1 and V2 */
+#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
+#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
+#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
+
+/* CPSW_PORT_V2 only */
+#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
+#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
+
+/* Bit definitions for the CPSW2_CONTROL register */
+#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
+#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
+#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
+#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
+#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
+#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
+#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
+#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
+#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
+#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
+#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
+#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
+#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
+#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
+#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
+#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
+#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
+
+#define CTRL_V2_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
+
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
+
+
+#define CTRL_V3_TS_BITS \
+ (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+ TS_LTYPE1_EN | VLAN_LTYPE1_EN)
+
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
+
+/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
+#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
+#define TS_SEQ_ID_OFFSET_MASK (0x3f)
+#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
+#define TS_MSG_TYPE_EN_MASK (0xffff)
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
+
+/* Bit definitions for the CPSW1_TS_CTL register */
+#define CPSW_V1_TS_RX_EN BIT(0)
+#define CPSW_V1_TS_TX_EN BIT(4)
+#define CPSW_V1_MSG_TYPE_OFS 16
+
+/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
+#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
+
+#define CPSW_MAX_BLKS_TX 15
+#define CPSW_MAX_BLKS_TX_SHIFT 4
+#define CPSW_MAX_BLKS_RX 5
+
+struct cpsw_host_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 tx_in_ctl;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 cpdma_tx_pri_map;
+ u32 cpdma_rx_chan_map;
+};
+
+struct cpsw_slave_data {
+ struct device_node *slave_node;
+ struct device_node *phy_node;
+ char phy_id[MII_BUS_ID_SIZE];
+ phy_interface_t phy_if;
+ u8 mac_addr[ETH_ALEN];
+ u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
+ struct phy *ifphy;
+ bool disabled;
+};
+
+struct cpsw_platform_data {
+ struct cpsw_slave_data *slave_data;
+ u32 ss_reg_ofs; /* Subsystem control register offset */
+ u32 channels; /* number of cpdma channels (symmetric) */
+ u32 slaves; /* number of slave cpgmac ports */
+ u32 active_slave;/* time stamping, ethtool and SIOCGMIIPHY slave */
+ u32 bd_ram_size; /*buffer descriptor ram size */
+ u32 mac_control; /* Mac control register */
+ u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
+ bool dual_emac; /* Enable Dual EMAC mode */
+};
+
+struct cpsw_slave {
+ void __iomem *regs;
+ int slave_num;
+ u32 mac_control;
+ struct cpsw_slave_data *data;
+ struct phy_device *phy;
+ struct net_device *ndev;
+ u32 port_vlan;
+ struct cpsw_sl *mac_sl;
+};
+
+static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
+{
+ return readl_relaxed(slave->regs + offset);
+}
+
+static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
+{
+ writel_relaxed(val, slave->regs + offset);
+}
+
+struct cpsw_vector {
+ struct cpdma_chan *ch;
+ int budget;
+};
+
+struct cpsw_common {
+ struct device *dev;
+ struct cpsw_platform_data data;
+ struct napi_struct napi_rx;
+ struct napi_struct napi_tx;
+ struct cpsw_ss_regs __iomem *regs;
+ struct cpsw_wr_regs __iomem *wr_regs;
+ u8 __iomem *hw_stats;
+ struct cpsw_host_regs __iomem *host_port_regs;
+ u32 version;
+ u32 coal_intvl;
+ u32 bus_freq_mhz;
+ int rx_packet_max;
+ int descs_pool_size;
+ struct cpsw_slave *slaves;
+ struct cpdma_ctlr *dma;
+ struct cpsw_vector txv[CPSW_MAX_QUEUES];
+ struct cpsw_vector rxv[CPSW_MAX_QUEUES];
+ struct cpsw_ale *ale;
+ bool quirk_irq;
+ bool rx_irq_disabled;
+ bool tx_irq_disabled;
+ u32 irqs_table[IRQ_NUM];
+ int misc_irq;
+ struct cpts *cpts;
+ struct devlink *devlink;
+ int rx_ch_num, tx_ch_num;
+ int speed;
+ int usage_count;
+ struct page_pool *page_pool[CPSW_MAX_QUEUES];
+ u8 br_members;
+ struct net_device *hw_bridge_dev;
+ bool ale_bypass;
+ u8 base_mac[ETH_ALEN];
+};
+
+struct cpsw_ale_ratelimit {
+ unsigned long cookie;
+ u64 rate_packet_ps;
+};
+
+struct cpsw_priv {
+ struct net_device *ndev;
+ struct device *dev;
+ u32 msg_enable;
+ u8 mac_addr[ETH_ALEN];
+ bool rx_pause;
+ bool tx_pause;
+ bool mqprio_hw;
+ int fifo_bw[CPSW_TC_NUM];
+ int shp_cfg_speed;
+ int tx_ts_enabled;
+ int rx_ts_enabled;
+ struct bpf_prog *xdp_prog;
+ struct xdp_rxq_info xdp_rxq[CPSW_MAX_QUEUES];
+ struct xdp_attachment_info xdpi;
+
+ u32 emac_port;
+ struct cpsw_common *cpsw;
+ int offload_fwd_mark;
+ u32 tx_packet_min;
+ struct cpsw_ale_ratelimit ale_bc_ratelimit;
+ struct cpsw_ale_ratelimit ale_mc_ratelimit;
+};
+
+#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
+#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
+
+extern int (*cpsw_slave_index)(struct cpsw_common *cpsw,
+ struct cpsw_priv *priv);
+
+struct addr_sync_ctx {
+ struct net_device *ndev;
+ const u8 *addr; /* address to be synched */
+ int consumed; /* number of address instances */
+ int flush; /* flush flag */
+};
+
+#define CPSW_XMETA_OFFSET ALIGN(sizeof(struct xdp_frame), sizeof(long))
+
+#define CPSW_XDP_CONSUMED 1
+#define CPSW_XDP_PASS 0
+
+struct __aligned(sizeof(long)) cpsw_meta_xdp {
+ struct net_device *ndev;
+ int ch;
+};
+
+/* The buf includes headroom compatible with both skb and xdpf */
+#define CPSW_HEADROOM_NA (max(XDP_PACKET_HEADROOM, NET_SKB_PAD) + NET_IP_ALIGN)
+
+static inline int cpsw_is_xdpf_handle(void *handle)
+{
+ return (unsigned long)handle & BIT(0);
+}
+
+static inline void *cpsw_xdpf_to_handle(struct xdp_frame *xdpf)
+{
+ return (void *)((unsigned long)xdpf | BIT(0));
+}
+
+static inline struct xdp_frame *cpsw_handle_to_xdpf(void *handle)
+{
+ return (struct xdp_frame *)((unsigned long)handle & ~BIT(0));
+}
+
+int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
+ int ale_ageout, phys_addr_t desc_mem_phys,
+ int descs_pool_size);
+void cpsw_split_res(struct cpsw_common *cpsw);
+int cpsw_fill_rx_channels(struct cpsw_priv *priv);
+void cpsw_intr_enable(struct cpsw_common *cpsw);
+void cpsw_intr_disable(struct cpsw_common *cpsw);
+void cpsw_tx_handler(void *token, int len, int status);
+int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw);
+void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw);
+int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf);
+int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
+ struct page *page, int port);
+int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
+ struct page *page, int port, int *len);
+irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id);
+irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id);
+irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id);
+int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget);
+int cpsw_tx_poll(struct napi_struct *napi_tx, int budget);
+int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget);
+int cpsw_rx_poll(struct napi_struct *napi_rx, int budget);
+void cpsw_rx_vlan_encap(struct sk_buff *skb);
+void soft_reset(const char *module, void __iomem *reg);
+void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue);
+int cpsw_need_resplit(struct cpsw_common *cpsw);
+int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd);
+int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate);
+int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
+ void *type_data);
+bool cpsw_shp_is_off(struct cpsw_priv *priv);
+void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv);
+void cpsw_qos_clsflower_resume(struct cpsw_priv *priv);
+
+/* ethtool */
+u32 cpsw_get_msglevel(struct net_device *ndev);
+void cpsw_set_msglevel(struct net_device *ndev, u32 value);
+int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
+int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack);
+int cpsw_get_sset_count(struct net_device *ndev, int sset);
+void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data);
+void cpsw_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data);
+void cpsw_get_pauseparam(struct net_device *ndev,
+ struct ethtool_pauseparam *pause);
+void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol);
+int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol);
+int cpsw_get_regs_len(struct net_device *ndev);
+void cpsw_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p);
+int cpsw_ethtool_op_begin(struct net_device *ndev);
+void cpsw_ethtool_op_complete(struct net_device *ndev);
+void cpsw_get_channels(struct net_device *ndev, struct ethtool_channels *ch);
+int cpsw_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *ecmd);
+int cpsw_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *ecmd);
+int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata);
+int cpsw_nway_reset(struct net_device *ndev);
+void cpsw_get_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack);
+int cpsw_set_ringparam(struct net_device *ndev,
+ struct ethtool_ringparam *ering,
+ struct kernel_ethtool_ringparam *kernel_ering,
+ struct netlink_ext_ack *extack);
+int cpsw_set_channels_common(struct net_device *ndev,
+ struct ethtool_channels *chs,
+ cpdma_handler_fn rx_handler);
+int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info);
+
+#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */
diff --git a/drivers/net/ethernet/ti/cpsw_sl.c b/drivers/net/ethernet/ti/cpsw_sl.c
new file mode 100644
index 000000000..0c7531cb0
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_sl.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments Ethernet Switch media-access-controller (MAC) submodule/
+ * Ethernet MAC Sliver (CPGMAC_SL)
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+
+#include "cpsw_sl.h"
+
+#define CPSW_SL_REG_NOTUSED U16_MAX
+
+static const u16 cpsw_sl_reg_map_cpsw[] = {
+ [CPSW_SL_IDVER] = 0x00,
+ [CPSW_SL_MACCONTROL] = 0x04,
+ [CPSW_SL_MACSTATUS] = 0x08,
+ [CPSW_SL_SOFT_RESET] = 0x0c,
+ [CPSW_SL_RX_MAXLEN] = 0x10,
+ [CPSW_SL_BOFFTEST] = 0x14,
+ [CPSW_SL_RX_PAUSE] = 0x18,
+ [CPSW_SL_TX_PAUSE] = 0x1c,
+ [CPSW_SL_EMCONTROL] = 0x20,
+ [CPSW_SL_RX_PRI_MAP] = 0x24,
+ [CPSW_SL_TX_GAP] = 0x28,
+};
+
+static const u16 cpsw_sl_reg_map_66ak2hk[] = {
+ [CPSW_SL_IDVER] = 0x00,
+ [CPSW_SL_MACCONTROL] = 0x04,
+ [CPSW_SL_MACSTATUS] = 0x08,
+ [CPSW_SL_SOFT_RESET] = 0x0c,
+ [CPSW_SL_RX_MAXLEN] = 0x10,
+ [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_RX_PAUSE] = 0x18,
+ [CPSW_SL_TX_PAUSE] = 0x1c,
+ [CPSW_SL_EMCONTROL] = 0x20,
+ [CPSW_SL_RX_PRI_MAP] = 0x24,
+ [CPSW_SL_TX_GAP] = CPSW_SL_REG_NOTUSED,
+};
+
+static const u16 cpsw_sl_reg_map_66ak2x_xgbe[] = {
+ [CPSW_SL_IDVER] = 0x00,
+ [CPSW_SL_MACCONTROL] = 0x04,
+ [CPSW_SL_MACSTATUS] = 0x08,
+ [CPSW_SL_SOFT_RESET] = 0x0c,
+ [CPSW_SL_RX_MAXLEN] = 0x10,
+ [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_RX_PAUSE] = 0x18,
+ [CPSW_SL_TX_PAUSE] = 0x1c,
+ [CPSW_SL_EMCONTROL] = 0x20,
+ [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_TX_GAP] = 0x28,
+};
+
+static const u16 cpsw_sl_reg_map_66ak2elg_am65[] = {
+ [CPSW_SL_IDVER] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_MACCONTROL] = 0x00,
+ [CPSW_SL_MACSTATUS] = 0x04,
+ [CPSW_SL_SOFT_RESET] = 0x08,
+ [CPSW_SL_RX_MAXLEN] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_BOFFTEST] = 0x0c,
+ [CPSW_SL_RX_PAUSE] = 0x10,
+ [CPSW_SL_TX_PAUSE] = 0x40,
+ [CPSW_SL_EMCONTROL] = 0x70,
+ [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED,
+ [CPSW_SL_TX_GAP] = 0x74,
+};
+
+#define CPSW_SL_SOFT_RESET_BIT BIT(0)
+
+#define CPSW_SL_STATUS_PN_IDLE BIT(31)
+#define CPSW_SL_AM65_STATUS_PN_E_IDLE BIT(30)
+#define CPSW_SL_AM65_STATUS_PN_P_IDLE BIT(29)
+#define CPSW_SL_AM65_STATUS_PN_TX_IDLE BIT(28)
+
+#define CPSW_SL_STATUS_IDLE_MASK_BASE (CPSW_SL_STATUS_PN_IDLE)
+
+#define CPSW_SL_STATUS_IDLE_MASK_K3 \
+ (CPSW_SL_STATUS_IDLE_MASK_BASE | CPSW_SL_AM65_STATUS_PN_E_IDLE | \
+ CPSW_SL_AM65_STATUS_PN_P_IDLE | CPSW_SL_AM65_STATUS_PN_TX_IDLE)
+
+#define CPSW_SL_CTL_FUNC_BASE \
+ (CPSW_SL_CTL_FULLDUPLEX |\
+ CPSW_SL_CTL_LOOPBACK |\
+ CPSW_SL_CTL_RX_FLOW_EN |\
+ CPSW_SL_CTL_TX_FLOW_EN |\
+ CPSW_SL_CTL_GMII_EN |\
+ CPSW_SL_CTL_TX_PACE |\
+ CPSW_SL_CTL_GIG |\
+ CPSW_SL_CTL_CMD_IDLE |\
+ CPSW_SL_CTL_IFCTL_A |\
+ CPSW_SL_CTL_IFCTL_B |\
+ CPSW_SL_CTL_GIG_FORCE |\
+ CPSW_SL_CTL_EXT_EN |\
+ CPSW_SL_CTL_RX_CEF_EN |\
+ CPSW_SL_CTL_RX_CSF_EN |\
+ CPSW_SL_CTL_RX_CMF_EN)
+
+struct cpsw_sl {
+ struct device *dev;
+ void __iomem *sl_base;
+ const u16 *regs;
+ u32 control_features;
+ u32 idle_mask;
+};
+
+struct cpsw_sl_dev_id {
+ const char *device_id;
+ const u16 *regs;
+ const u32 control_features;
+ const u32 regs_offset;
+ const u32 idle_mask;
+};
+
+static const struct cpsw_sl_dev_id cpsw_sl_id_match[] = {
+ {
+ .device_id = "cpsw",
+ .regs = cpsw_sl_reg_map_cpsw,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_TX_SG_LIM_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2hk",
+ .regs = cpsw_sl_reg_map_66ak2hk,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2x_xgbe",
+ .regs = cpsw_sl_reg_map_66ak2x_xgbe,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_XGIG |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_XGMII_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2el",
+ .regs = cpsw_sl_reg_map_66ak2elg_am65,
+ .regs_offset = 0x330,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_EXT_EN_RX_FLO |
+ CPSW_SL_CTL_EXT_EN_TX_FLO |
+ CPSW_SL_CTL_TX_SG_LIM_EN,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE,
+ },
+ {
+ .device_id = "66ak2g",
+ .regs = cpsw_sl_reg_map_66ak2elg_am65,
+ .regs_offset = 0x330,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_EXT_EN_RX_FLO |
+ CPSW_SL_CTL_EXT_EN_TX_FLO,
+ },
+ {
+ .device_id = "am65",
+ .regs = cpsw_sl_reg_map_66ak2elg_am65,
+ .regs_offset = 0x330,
+ .control_features = CPSW_SL_CTL_FUNC_BASE |
+ CPSW_SL_CTL_MTEST |
+ CPSW_SL_CTL_XGIG |
+ CPSW_SL_CTL_TX_SHORT_GAP_EN |
+ CPSW_SL_CTL_CRC_TYPE |
+ CPSW_SL_CTL_XGMII_EN |
+ CPSW_SL_CTL_EXT_EN_RX_FLO |
+ CPSW_SL_CTL_EXT_EN_TX_FLO |
+ CPSW_SL_CTL_TX_SG_LIM_EN |
+ CPSW_SL_CTL_EXT_EN_XGIG,
+ .idle_mask = CPSW_SL_STATUS_IDLE_MASK_K3,
+ },
+ { },
+};
+
+u32 cpsw_sl_reg_read(struct cpsw_sl *sl, enum cpsw_sl_regs reg)
+{
+ int val;
+
+ if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) {
+ dev_err(sl->dev, "cpsw_sl: not sup r reg: %04X\n",
+ sl->regs[reg]);
+ return 0;
+ }
+
+ val = readl(sl->sl_base + sl->regs[reg]);
+ dev_dbg(sl->dev, "cpsw_sl: reg: %04X r 0x%08X\n", sl->regs[reg], val);
+ return val;
+}
+
+void cpsw_sl_reg_write(struct cpsw_sl *sl, enum cpsw_sl_regs reg, u32 val)
+{
+ if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) {
+ dev_err(sl->dev, "cpsw_sl: not sup w reg: %04X\n",
+ sl->regs[reg]);
+ return;
+ }
+
+ dev_dbg(sl->dev, "cpsw_sl: reg: %04X w 0x%08X\n", sl->regs[reg], val);
+ writel(val, sl->sl_base + sl->regs[reg]);
+}
+
+static const struct cpsw_sl_dev_id *cpsw_sl_match_id(
+ const struct cpsw_sl_dev_id *id,
+ const char *device_id)
+{
+ if (!id || !device_id)
+ return NULL;
+
+ while (id->device_id) {
+ if (strcmp(device_id, id->device_id) == 0)
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+struct cpsw_sl *cpsw_sl_get(const char *device_id, struct device *dev,
+ void __iomem *sl_base)
+{
+ const struct cpsw_sl_dev_id *sl_dev_id;
+ struct cpsw_sl *sl;
+
+ sl = devm_kzalloc(dev, sizeof(struct cpsw_sl), GFP_KERNEL);
+ if (!sl)
+ return ERR_PTR(-ENOMEM);
+ sl->dev = dev;
+ sl->sl_base = sl_base;
+
+ sl_dev_id = cpsw_sl_match_id(cpsw_sl_id_match, device_id);
+ if (!sl_dev_id) {
+ dev_err(sl->dev, "cpsw_sl: dev_id %s not found.\n", device_id);
+ return ERR_PTR(-EINVAL);
+ }
+ sl->regs = sl_dev_id->regs;
+ sl->control_features = sl_dev_id->control_features;
+ sl->idle_mask = sl_dev_id->idle_mask;
+ sl->sl_base += sl_dev_id->regs_offset;
+
+ return sl;
+}
+
+void cpsw_sl_reset(struct cpsw_sl *sl, unsigned long tmo)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(tmo);
+
+ /* Set the soft reset bit */
+ cpsw_sl_reg_write(sl, CPSW_SL_SOFT_RESET, CPSW_SL_SOFT_RESET_BIT);
+
+ /* Wait for the bit to clear */
+ do {
+ usleep_range(100, 200);
+ } while ((cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) &
+ CPSW_SL_SOFT_RESET_BIT) &&
+ time_after(timeout, jiffies));
+
+ if (cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) & CPSW_SL_SOFT_RESET_BIT)
+ dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n");
+}
+
+u32 cpsw_sl_ctl_set(struct cpsw_sl *sl, u32 ctl_funcs)
+{
+ u32 val;
+
+ if (ctl_funcs & ~sl->control_features) {
+ dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n",
+ ctl_funcs & (~sl->control_features));
+ return -EINVAL;
+ }
+
+ val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL);
+ val |= ctl_funcs;
+ cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val);
+
+ return 0;
+}
+
+u32 cpsw_sl_ctl_clr(struct cpsw_sl *sl, u32 ctl_funcs)
+{
+ u32 val;
+
+ if (ctl_funcs & ~sl->control_features) {
+ dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n",
+ ctl_funcs & (~sl->control_features));
+ return -EINVAL;
+ }
+
+ val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL);
+ val &= ~ctl_funcs;
+ cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val);
+
+ return 0;
+}
+
+void cpsw_sl_ctl_reset(struct cpsw_sl *sl)
+{
+ cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, 0);
+}
+
+int cpsw_sl_wait_for_idle(struct cpsw_sl *sl, unsigned long tmo)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(tmo);
+
+ do {
+ usleep_range(100, 200);
+ } while (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) &
+ sl->idle_mask) && time_after(timeout, jiffies));
+
+ if (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) & sl->idle_mask)) {
+ dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/cpsw_sl.h b/drivers/net/ethernet/ti/cpsw_sl.h
new file mode 100644
index 000000000..a6d06a5a4
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_sl.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch media-access-controller (MAC) submodule/
+ * Ethernet MAC Sliver (CPGMAC_SL) APIs
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#ifndef __TI_CPSW_SL_H__
+#define __TI_CPSW_SL_H__
+
+#include <linux/device.h>
+
+enum cpsw_sl_regs {
+ CPSW_SL_IDVER,
+ CPSW_SL_MACCONTROL,
+ CPSW_SL_MACSTATUS,
+ CPSW_SL_SOFT_RESET,
+ CPSW_SL_RX_MAXLEN,
+ CPSW_SL_BOFFTEST,
+ CPSW_SL_RX_PAUSE,
+ CPSW_SL_TX_PAUSE,
+ CPSW_SL_EMCONTROL,
+ CPSW_SL_RX_PRI_MAP,
+ CPSW_SL_TX_GAP,
+};
+
+enum {
+ CPSW_SL_CTL_FULLDUPLEX = BIT(0), /* Full Duplex mode */
+ CPSW_SL_CTL_LOOPBACK = BIT(1), /* Loop Back Mode */
+ CPSW_SL_CTL_MTEST = BIT(2), /* Manufacturing Test mode */
+ CPSW_SL_CTL_RX_FLOW_EN = BIT(3), /* Receive Flow Control Enable */
+ CPSW_SL_CTL_TX_FLOW_EN = BIT(4), /* Transmit Flow Control Enable */
+ CPSW_SL_CTL_GMII_EN = BIT(5), /* GMII Enable */
+ CPSW_SL_CTL_TX_PACE = BIT(6), /* Transmit Pacing Enable */
+ CPSW_SL_CTL_GIG = BIT(7), /* Gigabit Mode */
+ CPSW_SL_CTL_XGIG = BIT(8), /* 10 Gigabit Mode */
+ CPSW_SL_CTL_TX_SHORT_GAP_EN = BIT(10), /* Transmit Short Gap Enable */
+ CPSW_SL_CTL_CMD_IDLE = BIT(11), /* Command Idle */
+ CPSW_SL_CTL_CRC_TYPE = BIT(12), /* Port CRC Type */
+ CPSW_SL_CTL_XGMII_EN = BIT(13), /* XGMII Enable */
+ CPSW_SL_CTL_IFCTL_A = BIT(15), /* Interface Control A */
+ CPSW_SL_CTL_IFCTL_B = BIT(16), /* Interface Control B */
+ CPSW_SL_CTL_GIG_FORCE = BIT(17), /* Gigabit Mode Force */
+ CPSW_SL_CTL_EXT_EN = BIT(18), /* External Control Enable */
+ CPSW_SL_CTL_EXT_EN_RX_FLO = BIT(19), /* Ext RX Flow Control Enable */
+ CPSW_SL_CTL_EXT_EN_TX_FLO = BIT(20), /* Ext TX Flow Control Enable */
+ CPSW_SL_CTL_TX_SG_LIM_EN = BIT(21), /* TXt Short Gap Limit Enable */
+ CPSW_SL_CTL_RX_CEF_EN = BIT(22), /* RX Copy Error Frames Enable */
+ CPSW_SL_CTL_RX_CSF_EN = BIT(23), /* RX Copy Short Frames Enable */
+ CPSW_SL_CTL_RX_CMF_EN = BIT(24), /* RX Copy MAC Control Frames Enable */
+ CPSW_SL_CTL_EXT_EN_XGIG = BIT(25), /* Ext XGIG Control En, k3 only */
+
+ CPSW_SL_CTL_FUNCS_COUNT
+};
+
+struct cpsw_sl;
+
+struct cpsw_sl *cpsw_sl_get(const char *device_id, struct device *dev,
+ void __iomem *sl_base);
+
+void cpsw_sl_reset(struct cpsw_sl *sl, unsigned long tmo);
+
+u32 cpsw_sl_ctl_set(struct cpsw_sl *sl, u32 ctl_funcs);
+u32 cpsw_sl_ctl_clr(struct cpsw_sl *sl, u32 ctl_funcs);
+void cpsw_sl_ctl_reset(struct cpsw_sl *sl);
+int cpsw_sl_wait_for_idle(struct cpsw_sl *sl, unsigned long tmo);
+
+u32 cpsw_sl_reg_read(struct cpsw_sl *sl, enum cpsw_sl_regs reg);
+void cpsw_sl_reg_write(struct cpsw_sl *sl, enum cpsw_sl_regs reg, u32 val);
+
+#endif /* __TI_CPSW_SL_H__ */
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.c b/drivers/net/ethernet/ti/cpsw_switchdev.c
new file mode 100644
index 000000000..ce85f7610
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.c
@@ -0,0 +1,544 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments switchdev Driver
+ *
+ * Copyright (C) 2019 Texas Instruments
+ *
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/if_bridge.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <net/switchdev.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "cpsw_priv.h"
+#include "cpsw_switchdev.h"
+
+struct cpsw_switchdev_event_work {
+ struct work_struct work;
+ struct switchdev_notifier_fdb_info fdb_info;
+ struct cpsw_priv *priv;
+ unsigned long event;
+};
+
+static int cpsw_port_stp_state_set(struct cpsw_priv *priv, u8 state)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u8 cpsw_state;
+ int ret = 0;
+
+ switch (state) {
+ case BR_STATE_FORWARDING:
+ cpsw_state = ALE_PORT_STATE_FORWARD;
+ break;
+ case BR_STATE_LEARNING:
+ cpsw_state = ALE_PORT_STATE_LEARN;
+ break;
+ case BR_STATE_DISABLED:
+ cpsw_state = ALE_PORT_STATE_DISABLE;
+ break;
+ case BR_STATE_LISTENING:
+ case BR_STATE_BLOCKING:
+ cpsw_state = ALE_PORT_STATE_BLOCK;
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ ret = cpsw_ale_control_set(cpsw->ale, priv->emac_port,
+ ALE_PORT_STATE, cpsw_state);
+ dev_dbg(priv->dev, "ale state: %u\n", cpsw_state);
+
+ return ret;
+}
+
+static int cpsw_port_attr_br_flags_set(struct cpsw_priv *priv,
+ struct net_device *orig_dev,
+ struct switchdev_brport_flags flags)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+
+ if (flags.mask & BR_MCAST_FLOOD) {
+ bool unreg_mcast_add = false;
+
+ if (flags.val & BR_MCAST_FLOOD)
+ unreg_mcast_add = true;
+
+ dev_dbg(priv->dev, "BR_MCAST_FLOOD: %d port %u\n",
+ unreg_mcast_add, priv->emac_port);
+
+ cpsw_ale_set_unreg_mcast(cpsw->ale, BIT(priv->emac_port),
+ unreg_mcast_add);
+ }
+
+ return 0;
+}
+
+static int cpsw_port_attr_br_flags_pre_set(struct net_device *netdev,
+ struct switchdev_brport_flags flags)
+{
+ if (flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int cpsw_port_attr_set(struct net_device *ndev, const void *ctx,
+ const struct switchdev_attr *attr,
+ struct netlink_ext_ack *extack)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int ret;
+
+ dev_dbg(priv->dev, "attr: id %u port: %u\n", attr->id, priv->emac_port);
+
+ switch (attr->id) {
+ case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS:
+ ret = cpsw_port_attr_br_flags_pre_set(ndev,
+ attr->u.brport_flags);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_STP_STATE:
+ ret = cpsw_port_stp_state_set(priv, attr->u.stp_state);
+ dev_dbg(priv->dev, "stp state: %u\n", attr->u.stp_state);
+ break;
+ case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS:
+ ret = cpsw_port_attr_br_flags_set(priv, attr->orig_dev,
+ attr->u.brport_flags);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static u16 cpsw_get_pvid(struct cpsw_priv *priv)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ u32 __iomem *port_vlan_reg;
+ u32 pvid;
+
+ if (priv->emac_port) {
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+ pvid = slave_read(cpsw->slaves + (priv->emac_port - 1), reg);
+ } else {
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ pvid = readl(port_vlan_reg);
+ }
+
+ pvid = pvid & 0xfff;
+
+ return pvid;
+}
+
+static void cpsw_set_pvid(struct cpsw_priv *priv, u16 vid, bool cfi, u32 cos)
+{
+ struct cpsw_common *cpsw = priv->cpsw;
+ void __iomem *port_vlan_reg;
+ u32 pvid;
+
+ pvid = vid;
+ pvid |= cfi ? BIT(12) : 0;
+ pvid |= (cos & 0x7) << 13;
+
+ if (priv->emac_port) {
+ int reg = CPSW2_PORT_VLAN;
+
+ if (cpsw->version == CPSW_VERSION_1)
+ reg = CPSW1_PORT_VLAN;
+ /* no barrier */
+ slave_write(cpsw->slaves + (priv->emac_port - 1), pvid, reg);
+ } else {
+ /* CPU port */
+ port_vlan_reg = &cpsw->host_port_regs->port_vlan;
+ writel(pvid, port_vlan_reg);
+ }
+}
+
+static int cpsw_port_vlan_add(struct cpsw_priv *priv, bool untag, bool pvid,
+ u16 vid, struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int unreg_mcast_mask = 0;
+ int reg_mcast_mask = 0;
+ int untag_mask = 0;
+ int port_mask;
+ int ret = 0;
+ u32 flags;
+
+ if (cpu_port) {
+ port_mask = BIT(HOST_PORT_NUM);
+ flags = orig_dev->flags;
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = BIT(priv->emac_port);
+ flags = priv->ndev->flags;
+ }
+
+ if (flags & IFF_MULTICAST)
+ reg_mcast_mask = port_mask;
+
+ if (untag)
+ untag_mask = port_mask;
+
+ ret = cpsw_ale_vlan_add_modify(cpsw->ale, vid, port_mask, untag_mask,
+ reg_mcast_mask, unreg_mcast_mask);
+ if (ret) {
+ dev_err(priv->dev, "Unable to add vlan\n");
+ return ret;
+ }
+
+ if (cpu_port)
+ cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+ if (!pvid)
+ return ret;
+
+ cpsw_set_pvid(priv, vid, 0, 0);
+
+ dev_dbg(priv->dev, "VID add: %s: vid:%u ports:%X\n",
+ priv->ndev->name, vid, port_mask);
+ return ret;
+}
+
+static int cpsw_port_vlan_del(struct cpsw_priv *priv, u16 vid,
+ struct net_device *orig_dev)
+{
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port_mask;
+ int ret = 0;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(priv->emac_port);
+
+ ret = cpsw_ale_vlan_del_modify(cpsw->ale, vid, port_mask);
+ if (ret != 0)
+ return ret;
+
+ /* We don't care for the return value here, error is returned only if
+ * the unicast entry is not present
+ */
+ if (cpu_port)
+ cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
+ HOST_PORT_NUM, ALE_VLAN, vid);
+
+ if (vid == cpsw_get_pvid(priv))
+ cpsw_set_pvid(priv, 0, 0, 0);
+
+ /* We don't care for the return value here, error is returned only if
+ * the multicast entry is not present
+ */
+ cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
+ port_mask, ALE_VLAN, vid);
+ dev_dbg(priv->dev, "VID del: %s: vid:%u ports:%X\n",
+ priv->ndev->name, vid, port_mask);
+
+ return ret;
+}
+
+static int cpsw_port_vlans_add(struct cpsw_priv *priv,
+ const struct switchdev_obj_port_vlan *vlan)
+{
+ bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED;
+ struct net_device *orig_dev = vlan->obj.orig_dev;
+ bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID;
+
+ dev_dbg(priv->dev, "VID add: %s: vid:%u flags:%X\n",
+ priv->ndev->name, vlan->vid, vlan->flags);
+
+ return cpsw_port_vlan_add(priv, untag, pvid, vlan->vid, orig_dev);
+}
+
+static int cpsw_port_mdb_add(struct cpsw_priv *priv,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port_mask;
+ int err;
+
+ if (cpu_port)
+ port_mask = BIT(HOST_PORT_NUM);
+ else
+ port_mask = BIT(priv->emac_port);
+
+ err = cpsw_ale_add_mcast(cpsw->ale, mdb->addr, port_mask,
+ ALE_VLAN, mdb->vid, 0);
+ dev_dbg(priv->dev, "MDB add: %s: vid %u:%pM ports: %X\n",
+ priv->ndev->name, mdb->vid, mdb->addr, port_mask);
+
+ return err;
+}
+
+static int cpsw_port_mdb_del(struct cpsw_priv *priv,
+ struct switchdev_obj_port_mdb *mdb)
+
+{
+ struct net_device *orig_dev = mdb->obj.orig_dev;
+ bool cpu_port = netif_is_bridge_master(orig_dev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ int del_mask;
+ int err;
+
+ if (cpu_port)
+ del_mask = BIT(HOST_PORT_NUM);
+ else
+ del_mask = BIT(priv->emac_port);
+
+ err = cpsw_ale_del_mcast(cpsw->ale, mdb->addr, del_mask,
+ ALE_VLAN, mdb->vid);
+ dev_dbg(priv->dev, "MDB del: %s: vid %u:%pM ports: %X\n",
+ priv->ndev->name, mdb->vid, mdb->addr, del_mask);
+
+ return err;
+}
+
+static int cpsw_port_obj_add(struct net_device *ndev, const void *ctx,
+ const struct switchdev_obj *obj,
+ struct netlink_ext_ack *extack)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err = 0;
+
+ dev_dbg(priv->dev, "obj_add: id %u port: %u\n",
+ obj->id, priv->emac_port);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = cpsw_port_vlans_add(priv, vlan);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = cpsw_port_mdb_add(priv, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static int cpsw_port_obj_del(struct net_device *ndev, const void *ctx,
+ const struct switchdev_obj *obj)
+{
+ struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj);
+ struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj);
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err = 0;
+
+ dev_dbg(priv->dev, "obj_del: id %u port: %u\n",
+ obj->id, priv->emac_port);
+
+ switch (obj->id) {
+ case SWITCHDEV_OBJ_ID_PORT_VLAN:
+ err = cpsw_port_vlan_del(priv, vlan->vid, vlan->obj.orig_dev);
+ break;
+ case SWITCHDEV_OBJ_ID_PORT_MDB:
+ case SWITCHDEV_OBJ_ID_HOST_MDB:
+ err = cpsw_port_mdb_del(priv, mdb);
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+static void cpsw_fdb_offload_notify(struct net_device *ndev,
+ struct switchdev_notifier_fdb_info *rcv)
+{
+ struct switchdev_notifier_fdb_info info = {};
+
+ info.addr = rcv->addr;
+ info.vid = rcv->vid;
+ info.offloaded = true;
+ call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED,
+ ndev, &info.info, NULL);
+}
+
+static void cpsw_switchdev_event_work(struct work_struct *work)
+{
+ struct cpsw_switchdev_event_work *switchdev_work =
+ container_of(work, struct cpsw_switchdev_event_work, work);
+ struct cpsw_priv *priv = switchdev_work->priv;
+ struct switchdev_notifier_fdb_info *fdb;
+ struct cpsw_common *cpsw = priv->cpsw;
+ int port = priv->emac_port;
+
+ rtnl_lock();
+ switch (switchdev_work->event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ dev_dbg(cpsw->dev, "cpsw_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port);
+
+ if (!fdb->added_by_user || fdb->is_local)
+ break;
+ if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port = HOST_PORT_NUM;
+
+ cpsw_ale_add_ucast(cpsw->ale, (u8 *)fdb->addr, port,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ cpsw_fdb_offload_notify(priv->ndev, fdb);
+ break;
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ fdb = &switchdev_work->fdb_info;
+
+ dev_dbg(cpsw->dev, "cpsw_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n",
+ fdb->addr, fdb->vid, fdb->added_by_user,
+ fdb->offloaded, port);
+
+ if (!fdb->added_by_user || fdb->is_local)
+ break;
+ if (memcmp(priv->mac_addr, (u8 *)fdb->addr, ETH_ALEN) == 0)
+ port = HOST_PORT_NUM;
+
+ cpsw_ale_del_ucast(cpsw->ale, (u8 *)fdb->addr, port,
+ fdb->vid ? ALE_VLAN : 0, fdb->vid);
+ break;
+ default:
+ break;
+ }
+ rtnl_unlock();
+
+ kfree(switchdev_work->fdb_info.addr);
+ kfree(switchdev_work);
+ dev_put(priv->ndev);
+}
+
+/* called under rcu_read_lock() */
+static int cpsw_switchdev_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *ndev = switchdev_notifier_info_to_dev(ptr);
+ struct switchdev_notifier_fdb_info *fdb_info = ptr;
+ struct cpsw_switchdev_event_work *switchdev_work;
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ int err;
+
+ if (event == SWITCHDEV_PORT_ATTR_SET) {
+ err = switchdev_handle_port_attr_set(ndev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ }
+
+ if (!cpsw_port_dev_check(ndev))
+ return NOTIFY_DONE;
+
+ switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC);
+ if (WARN_ON(!switchdev_work))
+ return NOTIFY_BAD;
+
+ INIT_WORK(&switchdev_work->work, cpsw_switchdev_event_work);
+ switchdev_work->priv = priv;
+ switchdev_work->event = event;
+
+ switch (event) {
+ case SWITCHDEV_FDB_ADD_TO_DEVICE:
+ case SWITCHDEV_FDB_DEL_TO_DEVICE:
+ memcpy(&switchdev_work->fdb_info, ptr,
+ sizeof(switchdev_work->fdb_info));
+ switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC);
+ if (!switchdev_work->fdb_info.addr)
+ goto err_addr_alloc;
+ ether_addr_copy((u8 *)switchdev_work->fdb_info.addr,
+ fdb_info->addr);
+ dev_hold(ndev);
+ break;
+ default:
+ kfree(switchdev_work);
+ return NOTIFY_DONE;
+ }
+
+ queue_work(system_long_wq, &switchdev_work->work);
+
+ return NOTIFY_DONE;
+
+err_addr_alloc:
+ kfree(switchdev_work);
+ return NOTIFY_BAD;
+}
+
+static struct notifier_block cpsw_switchdev_notifier = {
+ .notifier_call = cpsw_switchdev_event,
+};
+
+static int cpsw_switchdev_blocking_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = switchdev_notifier_info_to_dev(ptr);
+ int err;
+
+ switch (event) {
+ case SWITCHDEV_PORT_OBJ_ADD:
+ err = switchdev_handle_port_obj_add(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_obj_add);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_OBJ_DEL:
+ err = switchdev_handle_port_obj_del(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_obj_del);
+ return notifier_from_errno(err);
+ case SWITCHDEV_PORT_ATTR_SET:
+ err = switchdev_handle_port_attr_set(dev, ptr,
+ cpsw_port_dev_check,
+ cpsw_port_attr_set);
+ return notifier_from_errno(err);
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpsw_switchdev_bl_notifier = {
+ .notifier_call = cpsw_switchdev_blocking_event,
+};
+
+int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw)
+{
+ int ret = 0;
+
+ ret = register_switchdev_notifier(&cpsw_switchdev_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev notifier fail ret:%d\n",
+ ret);
+ return ret;
+ }
+
+ ret = register_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ if (ret) {
+ dev_err(cpsw->dev, "register switchdev blocking notifier ret:%d\n",
+ ret);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+ }
+
+ return ret;
+}
+
+void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw)
+{
+ unregister_switchdev_blocking_notifier(&cpsw_switchdev_bl_notifier);
+ unregister_switchdev_notifier(&cpsw_switchdev_notifier);
+}
diff --git a/drivers/net/ethernet/ti/cpsw_switchdev.h b/drivers/net/ethernet/ti/cpsw_switchdev.h
new file mode 100644
index 000000000..04a045dba
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpsw_switchdev.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments Ethernet Switch Driver
+ */
+
+#ifndef DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_
+#define DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_
+
+#include <net/switchdev.h>
+
+bool cpsw_port_dev_check(const struct net_device *dev);
+int cpsw_switchdev_register_notifiers(struct cpsw_common *cpsw);
+void cpsw_switchdev_unregister_notifiers(struct cpsw_common *cpsw);
+
+#endif /* DRIVERS_NET_ETHERNET_TI_CPSW_SWITCHDEV_H_ */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
new file mode 100644
index 000000000..92ca739fa
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -0,0 +1,819 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ */
+#include <linux/clk-provider.h>
+#include <linux/err.h>
+#include <linux/if.h>
+#include <linux/hrtimer.h>
+#include <linux/module.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_classify.h>
+#include <linux/time.h>
+#include <linux/uaccess.h>
+#include <linux/workqueue.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+
+#include "cpts.h"
+
+#define CPTS_SKB_TX_WORK_TIMEOUT 1 /* jiffies */
+#define CPTS_SKB_RX_TX_TMO 100 /*ms */
+#define CPTS_EVENT_RX_TX_TIMEOUT (100) /* ms */
+
+struct cpts_skb_cb_data {
+ u32 skb_mtype_seqid;
+ unsigned long tmo;
+};
+
+#define cpts_read32(c, r) readl_relaxed(&c->reg->r)
+#define cpts_write32(c, v, r) writel_relaxed(v, &c->reg->r)
+
+static int cpts_event_port(struct cpts_event *event)
+{
+ return (event->high >> PORT_NUMBER_SHIFT) & PORT_NUMBER_MASK;
+}
+
+static int event_expired(struct cpts_event *event)
+{
+ return time_after(jiffies, event->tmo);
+}
+
+static int event_type(struct cpts_event *event)
+{
+ return (event->high >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+}
+
+static int cpts_fifo_pop(struct cpts *cpts, u32 *high, u32 *low)
+{
+ u32 r = cpts_read32(cpts, intstat_raw);
+
+ if (r & TS_PEND_RAW) {
+ *high = cpts_read32(cpts, event_high);
+ *low = cpts_read32(cpts, event_low);
+ cpts_write32(cpts, EVENT_POP, event_pop);
+ return 0;
+ }
+ return -1;
+}
+
+static int cpts_purge_events(struct cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct cpts_event *event;
+ int removed = 0;
+
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (event_expired(event)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "cpts: event pool cleaned up %d\n", removed);
+ return removed ? 0 : -1;
+}
+
+static void cpts_purge_txq(struct cpts *cpts)
+{
+ struct cpts_skb_cb_data *skb_cb;
+ struct sk_buff *skb, *tmp;
+ int removed = 0;
+
+ skb_queue_walk_safe(&cpts->txq, skb, tmp) {
+ skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ if (time_after(jiffies, skb_cb->tmo)) {
+ __skb_unlink(skb, &cpts->txq);
+ dev_consume_skb_any(skb);
+ ++removed;
+ }
+ }
+
+ if (removed)
+ dev_dbg(cpts->dev, "txq cleaned up %d\n", removed);
+}
+
+/*
+ * Returns zero if matching event type was found.
+ */
+static int cpts_fifo_read(struct cpts *cpts, int match)
+{
+ struct ptp_clock_event pevent;
+ bool need_schedule = false;
+ struct cpts_event *event;
+ unsigned long flags;
+ int i, type = -1;
+ u32 hi, lo;
+
+ spin_lock_irqsave(&cpts->lock, flags);
+
+ for (i = 0; i < CPTS_FIFO_DEPTH; i++) {
+ if (cpts_fifo_pop(cpts, &hi, &lo))
+ break;
+
+ if (list_empty(&cpts->pool) && cpts_purge_events(cpts)) {
+ dev_warn(cpts->dev, "cpts: event pool empty\n");
+ break;
+ }
+
+ event = list_first_entry(&cpts->pool, struct cpts_event, list);
+ event->high = hi;
+ event->low = lo;
+ event->timestamp = timecounter_cyc2time(&cpts->tc, event->low);
+ type = event_type(event);
+
+ dev_dbg(cpts->dev, "CPTS_EV: %d high:%08X low:%08x\n",
+ type, event->high, event->low);
+ switch (type) {
+ case CPTS_EV_PUSH:
+ WRITE_ONCE(cpts->cur_timestamp, lo);
+ timecounter_read(&cpts->tc);
+ if (cpts->mult_new) {
+ cpts->cc.mult = cpts->mult_new;
+ cpts->mult_new = 0;
+ }
+ if (!cpts->irq_poll)
+ complete(&cpts->ts_push_complete);
+ break;
+ case CPTS_EV_TX:
+ case CPTS_EV_RX:
+ event->tmo = jiffies +
+ msecs_to_jiffies(CPTS_EVENT_RX_TX_TIMEOUT);
+
+ list_del_init(&event->list);
+ list_add_tail(&event->list, &cpts->events);
+ need_schedule = true;
+ break;
+ case CPTS_EV_ROLL:
+ case CPTS_EV_HALF:
+ break;
+ case CPTS_EV_HW:
+ pevent.timestamp = event->timestamp;
+ pevent.type = PTP_CLOCK_EXTTS;
+ pevent.index = cpts_event_port(event) - 1;
+ ptp_clock_event(cpts->clock, &pevent);
+ break;
+ default:
+ dev_err(cpts->dev, "cpts: unknown event type\n");
+ break;
+ }
+ if (type == match)
+ break;
+ }
+
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (!cpts->irq_poll && need_schedule)
+ ptp_schedule_worker(cpts->clock, 0);
+
+ return type == match ? 0 : -1;
+}
+
+void cpts_misc_interrupt(struct cpts *cpts)
+{
+ cpts_fifo_read(cpts, -1);
+}
+EXPORT_SYMBOL_GPL(cpts_misc_interrupt);
+
+static u64 cpts_systim_read(const struct cyclecounter *cc)
+{
+ struct cpts *cpts = container_of(cc, struct cpts, cc);
+
+ return READ_ONCE(cpts->cur_timestamp);
+}
+
+static void cpts_update_cur_time(struct cpts *cpts, int match,
+ struct ptp_system_timestamp *sts)
+{
+ unsigned long flags;
+
+ reinit_completion(&cpts->ts_push_complete);
+
+ /* use spin_lock_irqsave() here as it has to run very fast */
+ spin_lock_irqsave(&cpts->lock, flags);
+ ptp_read_system_prets(sts);
+ cpts_write32(cpts, TS_PUSH, ts_push);
+ cpts_read32(cpts, ts_push);
+ ptp_read_system_postts(sts);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ if (cpts->irq_poll && cpts_fifo_read(cpts, match) && match != -1)
+ dev_err(cpts->dev, "cpts: unable to obtain a time stamp\n");
+
+ if (!cpts->irq_poll &&
+ !wait_for_completion_timeout(&cpts->ts_push_complete, HZ))
+ dev_err(cpts->dev, "cpts: obtain a time stamp timeout\n");
+}
+
+/* PTP clock operations */
+
+static int cpts_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+ int neg_adj = 0;
+ u32 diff, mult;
+ u64 adj;
+
+ if (ppb < 0) {
+ neg_adj = 1;
+ ppb = -ppb;
+ }
+ mult = cpts->cc_mult;
+ adj = mult;
+ adj *= ppb;
+ diff = div_u64(adj, 1000000000ULL);
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ cpts->mult_new = neg_adj ? mult - diff : mult + diff;
+
+ cpts_update_cur_time(cpts, CPTS_EV_PUSH, NULL);
+
+ mutex_unlock(&cpts->ptp_clk_mutex);
+ return 0;
+}
+
+static int cpts_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+ timecounter_adjtime(&cpts->tc, delta);
+ mutex_unlock(&cpts->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int cpts_ptp_gettimeex(struct ptp_clock_info *ptp,
+ struct timespec64 *ts,
+ struct ptp_system_timestamp *sts)
+{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ cpts_update_cur_time(cpts, CPTS_EV_PUSH, sts);
+
+ ns = timecounter_read(&cpts->tc);
+ mutex_unlock(&cpts->ptp_clk_mutex);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int cpts_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+ u64 ns;
+
+ ns = timespec64_to_ns(ts);
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+ timecounter_init(&cpts->tc, &cpts->cc, ns);
+ mutex_unlock(&cpts->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int cpts_extts_enable(struct cpts *cpts, u32 index, int on)
+{
+ u32 v;
+
+ if (((cpts->hw_ts_enable & BIT(index)) >> index) == on)
+ return 0;
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ v = cpts_read32(cpts, control);
+ if (on) {
+ v |= BIT(8 + index);
+ cpts->hw_ts_enable |= BIT(index);
+ } else {
+ v &= ~BIT(8 + index);
+ cpts->hw_ts_enable &= ~BIT(index);
+ }
+ cpts_write32(cpts, v, control);
+
+ mutex_unlock(&cpts->ptp_clk_mutex);
+
+ return 0;
+}
+
+static int cpts_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
+{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return cpts_extts_enable(cpts, rq->extts.index, on);
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event)
+{
+ struct sk_buff_head txq_list;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ bool found = false;
+ u32 mtype_seqid;
+
+ mtype_seqid = event->high &
+ ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
+ (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
+ (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
+
+ __skb_queue_head_init(&txq_list);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice_init(&cpts->txq, &txq_list);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ skb_queue_walk_safe(&txq_list, skb, tmp) {
+ struct skb_shared_hwtstamps ssh;
+ struct cpts_skb_cb_data *skb_cb =
+ (struct cpts_skb_cb_data *)skb->cb;
+
+ if (mtype_seqid == skb_cb->skb_mtype_seqid) {
+ memset(&ssh, 0, sizeof(ssh));
+ ssh.hwtstamp = ns_to_ktime(event->timestamp);
+ skb_tstamp_tx(skb, &ssh);
+ found = true;
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ dev_dbg(cpts->dev, "match tx timestamp mtype_seqid %08x\n",
+ mtype_seqid);
+ break;
+ }
+
+ if (time_after(jiffies, skb_cb->tmo)) {
+ /* timeout any expired skbs over 1s */
+ dev_dbg(cpts->dev, "expiring tx timestamp from txq\n");
+ __skb_unlink(skb, &txq_list);
+ dev_consume_skb_any(skb);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ skb_queue_splice(&txq_list, &cpts->txq);
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ return found;
+}
+
+static void cpts_process_events(struct cpts *cpts)
+{
+ struct list_head *this, *next;
+ struct cpts_event *event;
+ LIST_HEAD(events_free);
+ unsigned long flags;
+ LIST_HEAD(events);
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_init(&cpts->events, &events);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ list_for_each_safe(this, next, &events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (cpts_match_tx_ts(cpts, event) ||
+ time_after(jiffies, event->tmo)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &events_free);
+ }
+ }
+
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_splice_tail(&events, &cpts->events);
+ list_splice_tail(&events_free, &cpts->pool);
+ spin_unlock_irqrestore(&cpts->lock, flags);
+}
+
+static long cpts_overflow_check(struct ptp_clock_info *ptp)
+{
+ struct cpts *cpts = container_of(ptp, struct cpts, info);
+ unsigned long delay = cpts->ov_check_period;
+ unsigned long flags;
+ u64 ns;
+
+ mutex_lock(&cpts->ptp_clk_mutex);
+
+ cpts_update_cur_time(cpts, -1, NULL);
+ ns = timecounter_read(&cpts->tc);
+
+ cpts_process_events(cpts);
+
+ spin_lock_irqsave(&cpts->txq.lock, flags);
+ if (!skb_queue_empty(&cpts->txq)) {
+ cpts_purge_txq(cpts);
+ if (!skb_queue_empty(&cpts->txq))
+ delay = CPTS_SKB_TX_WORK_TIMEOUT;
+ }
+ spin_unlock_irqrestore(&cpts->txq.lock, flags);
+
+ dev_dbg(cpts->dev, "cpts overflow check at %lld\n", ns);
+ mutex_unlock(&cpts->ptp_clk_mutex);
+ return (long)delay;
+}
+
+static const struct ptp_clock_info cpts_info = {
+ .owner = THIS_MODULE,
+ .name = "CTPS timer",
+ .max_adj = 1000000,
+ .n_ext_ts = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = cpts_ptp_adjfreq,
+ .adjtime = cpts_ptp_adjtime,
+ .gettimex64 = cpts_ptp_gettimeex,
+ .settime64 = cpts_ptp_settime,
+ .enable = cpts_ptp_enable,
+ .do_aux_work = cpts_overflow_check,
+};
+
+static int cpts_skb_get_mtype_seqid(struct sk_buff *skb, u32 *mtype_seqid)
+{
+ unsigned int ptp_class = ptp_classify_raw(skb);
+ struct ptp_header *hdr;
+ u8 msgtype;
+ u16 seqid;
+
+ if (ptp_class == PTP_CLASS_NONE)
+ return 0;
+
+ hdr = ptp_parse_header(skb, ptp_class);
+ if (!hdr)
+ return 0;
+
+ msgtype = ptp_get_msgtype(hdr, ptp_class);
+ seqid = ntohs(hdr->sequence_id);
+
+ *mtype_seqid = (msgtype & MESSAGE_TYPE_MASK) << MESSAGE_TYPE_SHIFT;
+ *mtype_seqid |= (seqid & SEQUENCE_ID_MASK) << SEQUENCE_ID_SHIFT;
+
+ return 1;
+}
+
+static u64 cpts_find_ts(struct cpts *cpts, struct sk_buff *skb,
+ int ev_type, u32 skb_mtype_seqid)
+{
+ struct list_head *this, *next;
+ struct cpts_event *event;
+ unsigned long flags;
+ u32 mtype_seqid;
+ u64 ns = 0;
+
+ cpts_fifo_read(cpts, -1);
+ spin_lock_irqsave(&cpts->lock, flags);
+ list_for_each_safe(this, next, &cpts->events) {
+ event = list_entry(this, struct cpts_event, list);
+ if (event_expired(event)) {
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ continue;
+ }
+
+ mtype_seqid = event->high &
+ ((MESSAGE_TYPE_MASK << MESSAGE_TYPE_SHIFT) |
+ (SEQUENCE_ID_MASK << SEQUENCE_ID_SHIFT) |
+ (EVENT_TYPE_MASK << EVENT_TYPE_SHIFT));
+
+ if (mtype_seqid == skb_mtype_seqid) {
+ ns = event->timestamp;
+ list_del_init(&event->list);
+ list_add(&event->list, &cpts->pool);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&cpts->lock, flags);
+
+ return ns;
+}
+
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ struct skb_shared_hwtstamps *ssh;
+ int ret;
+ u64 ns;
+
+ /* cpts_rx_timestamp() is called before eth_type_trans(), so
+ * skb MAC Hdr properties are not configured yet. Hence need to
+ * reset skb MAC header here
+ */
+ skb_reset_mac_header(skb);
+ ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+
+ skb_cb->skb_mtype_seqid |= (CPTS_EV_RX << EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
+ __func__, skb_cb->skb_mtype_seqid);
+
+ ns = cpts_find_ts(cpts, skb, CPTS_EV_RX, skb_cb->skb_mtype_seqid);
+ if (!ns)
+ return;
+ ssh = skb_hwtstamps(skb);
+ memset(ssh, 0, sizeof(*ssh));
+ ssh->hwtstamp = ns_to_ktime(ns);
+}
+EXPORT_SYMBOL_GPL(cpts_rx_timestamp);
+
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ struct cpts_skb_cb_data *skb_cb = (struct cpts_skb_cb_data *)skb->cb;
+ int ret;
+
+ if (!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))
+ return;
+
+ ret = cpts_skb_get_mtype_seqid(skb, &skb_cb->skb_mtype_seqid);
+ if (!ret)
+ return;
+
+ skb_cb->skb_mtype_seqid |= (CPTS_EV_TX << EVENT_TYPE_SHIFT);
+
+ dev_dbg(cpts->dev, "%s mtype seqid %08x\n",
+ __func__, skb_cb->skb_mtype_seqid);
+
+ /* Always defer TX TS processing to PTP worker */
+ skb_get(skb);
+ /* get the timestamp for timeouts */
+ skb_cb->tmo = jiffies + msecs_to_jiffies(CPTS_SKB_RX_TX_TMO);
+ skb_queue_tail(&cpts->txq, skb);
+ ptp_schedule_worker(cpts->clock, 0);
+}
+EXPORT_SYMBOL_GPL(cpts_tx_timestamp);
+
+int cpts_register(struct cpts *cpts)
+{
+ int err, i;
+
+ skb_queue_head_init(&cpts->txq);
+ INIT_LIST_HEAD(&cpts->events);
+ INIT_LIST_HEAD(&cpts->pool);
+ for (i = 0; i < CPTS_MAX_EVENTS; i++)
+ list_add(&cpts->pool_data[i].list, &cpts->pool);
+
+ err = clk_enable(cpts->refclk);
+ if (err)
+ return err;
+
+ cpts_write32(cpts, CPTS_EN, control);
+ cpts_write32(cpts, TS_PEND_EN, int_enable);
+
+ timecounter_init(&cpts->tc, &cpts->cc, ktime_get_real_ns());
+
+ cpts->clock = ptp_clock_register(&cpts->info, cpts->dev);
+ if (IS_ERR(cpts->clock)) {
+ err = PTR_ERR(cpts->clock);
+ cpts->clock = NULL;
+ goto err_ptp;
+ }
+ cpts->phc_index = ptp_clock_index(cpts->clock);
+
+ ptp_schedule_worker(cpts->clock, cpts->ov_check_period);
+ return 0;
+
+err_ptp:
+ clk_disable(cpts->refclk);
+ return err;
+}
+EXPORT_SYMBOL_GPL(cpts_register);
+
+void cpts_unregister(struct cpts *cpts)
+{
+ if (WARN_ON(!cpts->clock))
+ return;
+
+ ptp_clock_unregister(cpts->clock);
+ cpts->clock = NULL;
+ cpts->phc_index = -1;
+
+ cpts_write32(cpts, 0, int_enable);
+ cpts_write32(cpts, 0, control);
+
+ /* Drop all packet */
+ skb_queue_purge(&cpts->txq);
+
+ clk_disable(cpts->refclk);
+}
+EXPORT_SYMBOL_GPL(cpts_unregister);
+
+static void cpts_calc_mult_shift(struct cpts *cpts)
+{
+ u64 frac, maxsec, ns;
+ u32 freq;
+
+ freq = clk_get_rate(cpts->refclk);
+
+ /* Calc the maximum number of seconds which we can run before
+ * wrapping around.
+ */
+ maxsec = cpts->cc.mask;
+ do_div(maxsec, freq);
+ /* limit conversation rate to 10 sec as higher values will produce
+ * too small mult factors and so reduce the conversion accuracy
+ */
+ if (maxsec > 10)
+ maxsec = 10;
+
+ /* Calc overflow check period (maxsec / 2) */
+ cpts->ov_check_period = (HZ * maxsec) / 2;
+ dev_info(cpts->dev, "cpts: overflow check period %lu (jiffies)\n",
+ cpts->ov_check_period);
+
+ if (cpts->cc.mult || cpts->cc.shift)
+ return;
+
+ clocks_calc_mult_shift(&cpts->cc.mult, &cpts->cc.shift,
+ freq, NSEC_PER_SEC, maxsec);
+
+ frac = 0;
+ ns = cyclecounter_cyc2ns(&cpts->cc, freq, cpts->cc.mask, &frac);
+
+ dev_info(cpts->dev,
+ "CPTS: ref_clk_freq:%u calc_mult:%u calc_shift:%u error:%lld nsec/sec\n",
+ freq, cpts->cc.mult, cpts->cc.shift, (ns - NSEC_PER_SEC));
+}
+
+static int cpts_of_mux_clk_setup(struct cpts *cpts, struct device_node *node)
+{
+ struct device_node *refclk_np;
+ const char **parent_names;
+ unsigned int num_parents;
+ struct clk_hw *clk_hw;
+ int ret = -EINVAL;
+ u32 *mux_table;
+
+ refclk_np = of_get_child_by_name(node, "cpts-refclk-mux");
+ if (!refclk_np)
+ /* refclk selection supported not for all SoCs */
+ return 0;
+
+ num_parents = of_clk_get_parent_count(refclk_np);
+ if (num_parents < 1) {
+ dev_err(cpts->dev, "mux-clock %s must have parents\n",
+ refclk_np->name);
+ goto mux_fail;
+ }
+
+ parent_names = devm_kcalloc(cpts->dev, num_parents,
+ sizeof(*parent_names), GFP_KERNEL);
+
+ mux_table = devm_kcalloc(cpts->dev, num_parents, sizeof(*mux_table),
+ GFP_KERNEL);
+ if (!mux_table || !parent_names) {
+ ret = -ENOMEM;
+ goto mux_fail;
+ }
+
+ of_clk_parent_fill(refclk_np, parent_names, num_parents);
+
+ ret = of_property_read_variable_u32_array(refclk_np, "ti,mux-tbl",
+ mux_table,
+ num_parents, num_parents);
+ if (ret < 0)
+ goto mux_fail;
+
+ clk_hw = clk_hw_register_mux_table(cpts->dev, refclk_np->name,
+ parent_names, num_parents,
+ 0,
+ &cpts->reg->rftclk_sel, 0, 0x1F,
+ 0, mux_table, NULL);
+ if (IS_ERR(clk_hw)) {
+ ret = PTR_ERR(clk_hw);
+ goto mux_fail;
+ }
+
+ ret = devm_add_action_or_reset(cpts->dev,
+ (void(*)(void *))clk_hw_unregister_mux,
+ clk_hw);
+ if (ret) {
+ dev_err(cpts->dev, "add clkmux unreg action %d", ret);
+ goto mux_fail;
+ }
+
+ ret = of_clk_add_hw_provider(refclk_np, of_clk_hw_simple_get, clk_hw);
+ if (ret)
+ goto mux_fail;
+
+ ret = devm_add_action_or_reset(cpts->dev,
+ (void(*)(void *))of_clk_del_provider,
+ refclk_np);
+ if (ret) {
+ dev_err(cpts->dev, "add clkmux provider unreg action %d", ret);
+ goto mux_fail;
+ }
+
+ return ret;
+
+mux_fail:
+ of_node_put(refclk_np);
+ return ret;
+}
+
+static int cpts_of_parse(struct cpts *cpts, struct device_node *node)
+{
+ int ret = -EINVAL;
+ u32 prop;
+
+ if (!of_property_read_u32(node, "cpts_clock_mult", &prop))
+ cpts->cc.mult = prop;
+
+ if (!of_property_read_u32(node, "cpts_clock_shift", &prop))
+ cpts->cc.shift = prop;
+
+ if ((cpts->cc.mult && !cpts->cc.shift) ||
+ (!cpts->cc.mult && cpts->cc.shift))
+ goto of_error;
+
+ return cpts_of_mux_clk_setup(cpts, node);
+
+of_error:
+ dev_err(cpts->dev, "CPTS: Missing property in the DT.\n");
+ return ret;
+}
+
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node, u32 n_ext_ts)
+{
+ struct cpts *cpts;
+ int ret;
+
+ cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
+ if (!cpts)
+ return ERR_PTR(-ENOMEM);
+
+ cpts->dev = dev;
+ cpts->reg = (struct cpsw_cpts __iomem *)regs;
+ cpts->irq_poll = true;
+ spin_lock_init(&cpts->lock);
+ mutex_init(&cpts->ptp_clk_mutex);
+ init_completion(&cpts->ts_push_complete);
+
+ ret = cpts_of_parse(cpts, node);
+ if (ret)
+ return ERR_PTR(ret);
+
+ cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
+ if (IS_ERR(cpts->refclk))
+ /* try get clk from dev node for compatibility */
+ cpts->refclk = devm_clk_get(dev, "cpts");
+
+ if (IS_ERR(cpts->refclk)) {
+ dev_err(dev, "Failed to get cpts refclk %ld\n",
+ PTR_ERR(cpts->refclk));
+ return ERR_CAST(cpts->refclk);
+ }
+
+ ret = clk_prepare(cpts->refclk);
+ if (ret)
+ return ERR_PTR(ret);
+
+ cpts->cc.read = cpts_systim_read;
+ cpts->cc.mask = CLOCKSOURCE_MASK(32);
+ cpts->info = cpts_info;
+ cpts->phc_index = -1;
+
+ if (n_ext_ts)
+ cpts->info.n_ext_ts = n_ext_ts;
+
+ cpts_calc_mult_shift(cpts);
+ /* save cc.mult original value as it can be modified
+ * by cpts_ptp_adjfreq().
+ */
+ cpts->cc_mult = cpts->cc.mult;
+
+ return cpts;
+}
+EXPORT_SYMBOL_GPL(cpts_create);
+
+void cpts_release(struct cpts *cpts)
+{
+ if (!cpts)
+ return;
+
+ if (WARN_ON(!cpts->refclk))
+ return;
+
+ clk_unprepare(cpts->refclk);
+}
+EXPORT_SYMBOL_GPL(cpts_release);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI CPTS driver");
+MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>");
diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h
new file mode 100644
index 000000000..07222f651
--- /dev/null
+++ b/drivers/net/ethernet/ti/cpts.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * TI Common Platform Time Sync
+ *
+ * Copyright (C) 2012 Richard Cochran <richardcochran@gmail.com>
+ *
+ */
+#ifndef _TI_CPTS_H_
+#define _TI_CPTS_H_
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/clocksource.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/skbuff.h>
+#include <linux/ptp_classify.h>
+#include <linux/timecounter.h>
+
+struct cpsw_cpts {
+ u32 idver; /* Identification and version */
+ u32 control; /* Time sync control */
+ u32 rftclk_sel; /* Reference Clock Select Register */
+ u32 ts_push; /* Time stamp event push */
+ u32 ts_load_val; /* Time stamp load value */
+ u32 ts_load_en; /* Time stamp load enable */
+ u32 res2[2];
+ u32 intstat_raw; /* Time sync interrupt status raw */
+ u32 intstat_masked; /* Time sync interrupt status masked */
+ u32 int_enable; /* Time sync interrupt enable */
+ u32 res3;
+ u32 event_pop; /* Event interrupt pop */
+ u32 event_low; /* 32 Bit Event Time Stamp */
+ u32 event_high; /* Event Type Fields */
+};
+
+/* Bit definitions for the IDVER register */
+#define TX_IDENT_SHIFT (16) /* TX Identification Value */
+#define TX_IDENT_MASK (0xffff)
+#define RTL_VER_SHIFT (11) /* RTL Version Value */
+#define RTL_VER_MASK (0x1f)
+#define MAJOR_VER_SHIFT (8) /* Major Version Value */
+#define MAJOR_VER_MASK (0x7)
+#define MINOR_VER_SHIFT (0) /* Minor Version Value */
+#define MINOR_VER_MASK (0xff)
+
+/* Bit definitions for the CONTROL register */
+#define HW4_TS_PUSH_EN (1<<11) /* Hardware push 4 enable */
+#define HW3_TS_PUSH_EN (1<<10) /* Hardware push 3 enable */
+#define HW2_TS_PUSH_EN (1<<9) /* Hardware push 2 enable */
+#define HW1_TS_PUSH_EN (1<<8) /* Hardware push 1 enable */
+#define INT_TEST (1<<1) /* Interrupt Test */
+#define CPTS_EN (1<<0) /* Time Sync Enable */
+
+/*
+ * Definitions for the single bit resisters:
+ * TS_PUSH TS_LOAD_EN INTSTAT_RAW INTSTAT_MASKED INT_ENABLE EVENT_POP
+ */
+#define TS_PUSH (1<<0) /* Time stamp event push */
+#define TS_LOAD_EN (1<<0) /* Time Stamp Load */
+#define TS_PEND_RAW (1<<0) /* int read (before enable) */
+#define TS_PEND (1<<0) /* masked interrupt read (after enable) */
+#define TS_PEND_EN (1<<0) /* masked interrupt enable */
+#define EVENT_POP (1<<0) /* writing discards one event */
+
+/* Bit definitions for the EVENT_HIGH register */
+#define PORT_NUMBER_SHIFT (24) /* Indicates Ethernet port or HW pin */
+#define PORT_NUMBER_MASK (0x1f)
+#define EVENT_TYPE_SHIFT (20) /* Time sync event type */
+#define EVENT_TYPE_MASK (0xf)
+#define MESSAGE_TYPE_SHIFT (16) /* PTP message type */
+#define MESSAGE_TYPE_MASK (0xf)
+#define SEQUENCE_ID_SHIFT (0) /* PTP message sequence ID */
+#define SEQUENCE_ID_MASK (0xffff)
+
+enum {
+ CPTS_EV_PUSH, /* Time Stamp Push Event */
+ CPTS_EV_ROLL, /* Time Stamp Rollover Event */
+ CPTS_EV_HALF, /* Time Stamp Half Rollover Event */
+ CPTS_EV_HW, /* Hardware Time Stamp Push Event */
+ CPTS_EV_RX, /* Ethernet Receive Event */
+ CPTS_EV_TX, /* Ethernet Transmit Event */
+};
+
+#define CPTS_FIFO_DEPTH 16
+#define CPTS_MAX_EVENTS 32
+
+struct cpts_event {
+ struct list_head list;
+ unsigned long tmo;
+ u32 high;
+ u32 low;
+ u64 timestamp;
+};
+
+struct cpts {
+ struct device *dev;
+ struct cpsw_cpts __iomem *reg;
+ int tx_enable;
+ int rx_enable;
+ struct ptp_clock_info info;
+ struct ptp_clock *clock;
+ spinlock_t lock; /* protects fifo/events */
+ u32 cc_mult; /* for the nominal frequency */
+ struct cyclecounter cc;
+ struct timecounter tc;
+ int phc_index;
+ struct clk *refclk;
+ struct list_head events;
+ struct list_head pool;
+ struct cpts_event pool_data[CPTS_MAX_EVENTS];
+ unsigned long ov_check_period;
+ struct sk_buff_head txq;
+ u64 cur_timestamp;
+ u32 mult_new;
+ struct mutex ptp_clk_mutex; /* sync PTP interface and worker */
+ bool irq_poll;
+ struct completion ts_push_complete;
+ u32 hw_ts_enable;
+};
+
+void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb);
+int cpts_register(struct cpts *cpts);
+void cpts_unregister(struct cpts *cpts);
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node, u32 n_ext_ts);
+void cpts_release(struct cpts *cpts);
+void cpts_misc_interrupt(struct cpts *cpts);
+
+static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ unsigned int class = ptp_classify_raw(skb);
+
+ if (class == PTP_CLASS_NONE)
+ return false;
+
+ return true;
+}
+
+static inline void cpts_set_irqpoll(struct cpts *cpts, bool en)
+{
+ cpts->irq_poll = en;
+}
+
+#else
+struct cpts;
+
+static inline void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+static inline void cpts_tx_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+}
+
+static inline
+struct cpts *cpts_create(struct device *dev, void __iomem *regs,
+ struct device_node *node, u32 n_ext_ts)
+{
+ return NULL;
+}
+
+static inline void cpts_release(struct cpts *cpts)
+{
+}
+
+static inline int
+cpts_register(struct cpts *cpts)
+{
+ return 0;
+}
+
+static inline void cpts_unregister(struct cpts *cpts)
+{
+}
+
+static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb)
+{
+ return false;
+}
+
+static inline void cpts_misc_interrupt(struct cpts *cpts)
+{
+}
+
+static inline void cpts_set_irqpoll(struct cpts *cpts, bool en)
+{
+}
+#endif
+
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
new file mode 100644
index 000000000..d2eab5cd1
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -0,0 +1,1444 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/genalloc.h>
+#include "davinci_cpdma.h"
+
+/* DMA Registers */
+#define CPDMA_TXIDVER 0x00
+#define CPDMA_TXCONTROL 0x04
+#define CPDMA_TXTEARDOWN 0x08
+#define CPDMA_RXIDVER 0x10
+#define CPDMA_RXCONTROL 0x14
+#define CPDMA_SOFTRESET 0x1c
+#define CPDMA_RXTEARDOWN 0x18
+#define CPDMA_TX_PRI0_RATE 0x30
+#define CPDMA_TXINTSTATRAW 0x80
+#define CPDMA_TXINTSTATMASKED 0x84
+#define CPDMA_TXINTMASKSET 0x88
+#define CPDMA_TXINTMASKCLEAR 0x8c
+#define CPDMA_MACINVECTOR 0x90
+#define CPDMA_MACEOIVECTOR 0x94
+#define CPDMA_RXINTSTATRAW 0xa0
+#define CPDMA_RXINTSTATMASKED 0xa4
+#define CPDMA_RXINTMASKSET 0xa8
+#define CPDMA_RXINTMASKCLEAR 0xac
+#define CPDMA_DMAINTSTATRAW 0xb0
+#define CPDMA_DMAINTSTATMASKED 0xb4
+#define CPDMA_DMAINTMASKSET 0xb8
+#define CPDMA_DMAINTMASKCLEAR 0xbc
+#define CPDMA_DMAINT_HOSTERR BIT(1)
+
+/* the following exist only if has_ext_regs is set */
+#define CPDMA_DMACONTROL 0x20
+#define CPDMA_DMASTATUS 0x24
+#define CPDMA_RXBUFFOFS 0x28
+#define CPDMA_EM_CONTROL 0x2c
+
+/* Descriptor mode bits */
+#define CPDMA_DESC_SOP BIT(31)
+#define CPDMA_DESC_EOP BIT(30)
+#define CPDMA_DESC_OWNER BIT(29)
+#define CPDMA_DESC_EOQ BIT(28)
+#define CPDMA_DESC_TD_COMPLETE BIT(27)
+#define CPDMA_DESC_PASS_CRC BIT(26)
+#define CPDMA_DESC_TO_PORT_EN BIT(20)
+#define CPDMA_TO_PORT_SHIFT 16
+#define CPDMA_DESC_PORT_MASK (BIT(18) | BIT(17) | BIT(16))
+#define CPDMA_DESC_CRC_LEN 4
+
+#define CPDMA_TEARDOWN_VALUE 0xfffffffc
+
+#define CPDMA_MAX_RLIM_CNT 16384
+
+struct cpdma_desc {
+ /* hardware fields */
+ u32 hw_next;
+ u32 hw_buffer;
+ u32 hw_len;
+ u32 hw_mode;
+ /* software fields */
+ void *sw_token;
+ u32 sw_buffer;
+ u32 sw_len;
+};
+
+struct cpdma_desc_pool {
+ phys_addr_t phys;
+ dma_addr_t hw_addr;
+ void __iomem *iomap; /* ioremap map */
+ void *cpumap; /* dma_alloc map */
+ int desc_size, mem_size;
+ int num_desc;
+ struct device *dev;
+ struct gen_pool *gen_pool;
+};
+
+enum cpdma_state {
+ CPDMA_STATE_IDLE,
+ CPDMA_STATE_ACTIVE,
+ CPDMA_STATE_TEARDOWN,
+};
+
+struct cpdma_ctlr {
+ enum cpdma_state state;
+ struct cpdma_params params;
+ struct device *dev;
+ struct cpdma_desc_pool *pool;
+ spinlock_t lock;
+ struct cpdma_chan *channels[2 * CPDMA_MAX_CHANNELS];
+ int chan_num;
+ int num_rx_desc; /* RX descriptors number */
+ int num_tx_desc; /* TX descriptors number */
+};
+
+struct cpdma_chan {
+ struct cpdma_desc __iomem *head, *tail;
+ void __iomem *hdp, *cp, *rxfree;
+ enum cpdma_state state;
+ struct cpdma_ctlr *ctlr;
+ int chan_num;
+ spinlock_t lock;
+ int count;
+ u32 desc_num;
+ u32 mask;
+ cpdma_handler_fn handler;
+ enum dma_data_direction dir;
+ struct cpdma_chan_stats stats;
+ /* offsets into dmaregs */
+ int int_set, int_clear, td;
+ int weight;
+ u32 rate_factor;
+ u32 rate;
+};
+
+struct cpdma_control_info {
+ u32 reg;
+ u32 shift, mask;
+ int access;
+#define ACCESS_RO BIT(0)
+#define ACCESS_WO BIT(1)
+#define ACCESS_RW (ACCESS_RO | ACCESS_WO)
+};
+
+struct submit_info {
+ struct cpdma_chan *chan;
+ int directed;
+ void *token;
+ void *data_virt;
+ dma_addr_t data_dma;
+ int len;
+};
+
+static struct cpdma_control_info controls[] = {
+ [CPDMA_TX_RLIM] = {CPDMA_DMACONTROL, 8, 0xffff, ACCESS_RW},
+ [CPDMA_CMD_IDLE] = {CPDMA_DMACONTROL, 3, 1, ACCESS_WO},
+ [CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL, 4, 1, ACCESS_RW},
+ [CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL, 2, 1, ACCESS_RW},
+ [CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL, 1, 1, ACCESS_RW},
+ [CPDMA_TX_PRIO_FIXED] = {CPDMA_DMACONTROL, 0, 1, ACCESS_RW},
+ [CPDMA_STAT_IDLE] = {CPDMA_DMASTATUS, 31, 1, ACCESS_RO},
+ [CPDMA_STAT_TX_ERR_CODE] = {CPDMA_DMASTATUS, 20, 0xf, ACCESS_RW},
+ [CPDMA_STAT_TX_ERR_CHAN] = {CPDMA_DMASTATUS, 16, 0x7, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CODE] = {CPDMA_DMASTATUS, 12, 0xf, ACCESS_RW},
+ [CPDMA_STAT_RX_ERR_CHAN] = {CPDMA_DMASTATUS, 8, 0x7, ACCESS_RW},
+ [CPDMA_RX_BUFFER_OFFSET] = {CPDMA_RXBUFFOFS, 0, 0xffff, ACCESS_RW},
+};
+
+#define tx_chan_num(chan) (chan)
+#define rx_chan_num(chan) ((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan) ((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan) (!is_rx_chan(chan))
+#define __chan_linear(chan_num) ((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan) __chan_linear((chan)->chan_num)
+
+/* The following make access to common cpdma_ctlr params more readable */
+#define dmaregs params.dmaregs
+#define num_chan params.num_chan
+
+/* various accessors */
+#define dma_reg_read(ctlr, ofs) readl((ctlr)->dmaregs + (ofs))
+#define chan_read(chan, fld) readl((chan)->fld)
+#define desc_read(desc, fld) readl(&(desc)->fld)
+#define dma_reg_write(ctlr, ofs, v) writel(v, (ctlr)->dmaregs + (ofs))
+#define chan_write(chan, fld, v) writel(v, (chan)->fld)
+#define desc_write(desc, fld, v) writel((u32)(v), &(desc)->fld)
+
+#define cpdma_desc_to_port(chan, mode, directed) \
+ do { \
+ if (!is_rx_chan(chan) && ((directed == 1) || \
+ (directed == 2))) \
+ mode |= (CPDMA_DESC_TO_PORT_EN | \
+ (directed << CPDMA_TO_PORT_SHIFT)); \
+ } while (0)
+
+#define CPDMA_DMA_EXT_MAP BIT(16)
+
+static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
+{
+ struct cpdma_desc_pool *pool = ctlr->pool;
+
+ if (!pool)
+ return;
+
+ WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
+ "cpdma_desc_pool size %zd != avail %zd",
+ gen_pool_size(pool->gen_pool),
+ gen_pool_avail(pool->gen_pool));
+ if (pool->cpumap)
+ dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
+ pool->phys);
+}
+
+/*
+ * Utility constructs for a cpdma descriptor pool. Some devices (e.g. davinci
+ * emac) have dedicated on-chip memory for these descriptors. Some other
+ * devices (e.g. cpsw switches) use plain old memory. Descriptor pools
+ * abstract out these details
+ */
+static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
+{
+ struct cpdma_params *cpdma_params = &ctlr->params;
+ struct cpdma_desc_pool *pool;
+ int ret = -ENOMEM;
+
+ pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ goto gen_pool_create_fail;
+ ctlr->pool = pool;
+
+ pool->mem_size = cpdma_params->desc_mem_size;
+ pool->desc_size = ALIGN(sizeof(struct cpdma_desc),
+ cpdma_params->desc_align);
+ pool->num_desc = pool->mem_size / pool->desc_size;
+
+ if (cpdma_params->descs_pool_size) {
+ /* recalculate memory size required cpdma descriptor pool
+ * basing on number of descriptors specified by user and
+ * if memory size > CPPI internal RAM size (desc_mem_size)
+ * then switch to use DDR
+ */
+ pool->num_desc = cpdma_params->descs_pool_size;
+ pool->mem_size = pool->desc_size * pool->num_desc;
+ if (pool->mem_size > cpdma_params->desc_mem_size)
+ cpdma_params->desc_mem_phys = 0;
+ }
+
+ pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
+ -1, "cpdma");
+ if (IS_ERR(pool->gen_pool)) {
+ ret = PTR_ERR(pool->gen_pool);
+ dev_err(ctlr->dev, "pool create failed %d\n", ret);
+ goto gen_pool_create_fail;
+ }
+
+ if (cpdma_params->desc_mem_phys) {
+ pool->phys = cpdma_params->desc_mem_phys;
+ pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
+ pool->mem_size);
+ pool->hw_addr = cpdma_params->desc_hw_addr;
+ } else {
+ pool->cpumap = dma_alloc_coherent(ctlr->dev, pool->mem_size,
+ &pool->hw_addr, GFP_KERNEL);
+ pool->iomap = (void __iomem __force *)pool->cpumap;
+ pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
+ }
+
+ if (!pool->iomap)
+ goto gen_pool_create_fail;
+
+ ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
+ pool->phys, pool->mem_size, -1);
+ if (ret < 0) {
+ dev_err(ctlr->dev, "pool add failed %d\n", ret);
+ goto gen_pool_add_virt_fail;
+ }
+
+ return 0;
+
+gen_pool_add_virt_fail:
+ cpdma_desc_pool_destroy(ctlr);
+gen_pool_create_fail:
+ ctlr->pool = NULL;
+ return ret;
+}
+
+static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc)
+{
+ if (!desc)
+ return 0;
+ return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
+}
+
+static inline struct cpdma_desc __iomem *
+desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
+{
+ return dma ? pool->iomap + dma - pool->hw_addr : NULL;
+}
+
+static struct cpdma_desc __iomem *
+cpdma_desc_alloc(struct cpdma_desc_pool *pool)
+{
+ return (struct cpdma_desc __iomem *)
+ gen_pool_alloc(pool->gen_pool, pool->desc_size);
+}
+
+static void cpdma_desc_free(struct cpdma_desc_pool *pool,
+ struct cpdma_desc __iomem *desc, int num_desc)
+{
+ gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
+}
+
+static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+ struct cpdma_control_info *info = &controls[control];
+ u32 val;
+
+ if (!ctlr->params.has_ext_regs)
+ return -ENOTSUPP;
+
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ return -ENOENT;
+
+ if ((info->access & ACCESS_WO) != ACCESS_WO)
+ return -EPERM;
+
+ val = dma_reg_read(ctlr, info->reg);
+ val &= ~(info->mask << info->shift);
+ val |= (value & info->mask) << info->shift;
+ dma_reg_write(ctlr, info->reg, val);
+
+ return 0;
+}
+
+static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+ struct cpdma_control_info *info = &controls[control];
+ int ret;
+
+ if (!ctlr->params.has_ext_regs)
+ return -ENOTSUPP;
+
+ if (ctlr->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ if (control < 0 || control >= ARRAY_SIZE(controls))
+ return -ENOENT;
+
+ if ((info->access & ACCESS_RO) != ACCESS_RO)
+ return -EPERM;
+
+ ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
+ return ret;
+}
+
+/* cpdma_chan_set_chan_shaper - set shaper for a channel
+ * Has to be called under ctlr lock
+ */
+static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ u32 rate_reg;
+ u32 rmask;
+ int ret;
+
+ if (!chan->rate)
+ return 0;
+
+ rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
+ dma_reg_write(ctlr, rate_reg, chan->rate_factor);
+
+ rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
+ rmask |= chan->mask;
+
+ ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
+ return ret;
+}
+
+static int cpdma_chan_on(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EBUSY;
+ }
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+ dma_reg_write(ctlr, chan->int_set, chan->mask);
+ chan->state = CPDMA_STATE_ACTIVE;
+ if (chan->head) {
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ if (chan->rxfree)
+ chan_write(chan, rxfree, chan->count);
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+/* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
+ * rmask - mask of rate limited channels
+ * Returns min rate in Kb/s
+ */
+static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
+ u32 *rmask, int *prio_mode)
+{
+ struct cpdma_ctlr *ctlr = ch->ctlr;
+ struct cpdma_chan *chan;
+ u32 old_rate = ch->rate;
+ u32 new_rmask = 0;
+ int rlim = 0;
+ int i;
+
+ for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
+ chan = ctlr->channels[i];
+ if (!chan)
+ continue;
+
+ if (chan == ch)
+ chan->rate = rate;
+
+ if (chan->rate) {
+ rlim = 1;
+ new_rmask |= chan->mask;
+ continue;
+ }
+
+ if (rlim)
+ goto err;
+ }
+
+ *rmask = new_rmask;
+ *prio_mode = rlim;
+ return 0;
+
+err:
+ ch->rate = old_rate;
+ dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
+ chan->chan_num);
+ return -EINVAL;
+}
+
+static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
+ struct cpdma_chan *ch)
+{
+ u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
+ u32 best_send_cnt = 0, best_idle_cnt = 0;
+ u32 new_rate, best_rate = 0, rate_reg;
+ u64 send_cnt, idle_cnt;
+ u32 min_send_cnt, freq;
+ u64 divident, divisor;
+
+ if (!ch->rate) {
+ ch->rate_factor = 0;
+ goto set_factor;
+ }
+
+ freq = ctlr->params.bus_freq_mhz * 1000 * 32;
+ if (!freq) {
+ dev_err(ctlr->dev, "The bus frequency is not set\n");
+ return -EINVAL;
+ }
+
+ min_send_cnt = freq - ch->rate;
+ send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
+ while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
+ divident = ch->rate * send_cnt;
+ divisor = min_send_cnt;
+ idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
+
+ divident = freq * idle_cnt;
+ divisor = idle_cnt + send_cnt;
+ new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
+
+ delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
+ if (delta < best_delta) {
+ best_delta = delta;
+ best_send_cnt = send_cnt;
+ best_idle_cnt = idle_cnt;
+ best_rate = new_rate;
+
+ if (!delta)
+ break;
+ }
+
+ if (prev_delta >= delta) {
+ prev_delta = delta;
+ send_cnt++;
+ continue;
+ }
+
+ idle_cnt++;
+ divident = freq * idle_cnt;
+ send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
+ send_cnt -= idle_cnt;
+ prev_delta = UINT_MAX;
+ }
+
+ ch->rate = best_rate;
+ ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
+
+set_factor:
+ rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
+ dma_reg_write(ctlr, rate_reg, ch->rate_factor);
+ return 0;
+}
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
+{
+ struct cpdma_ctlr *ctlr;
+
+ ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
+ if (!ctlr)
+ return NULL;
+
+ ctlr->state = CPDMA_STATE_IDLE;
+ ctlr->params = *params;
+ ctlr->dev = params->dev;
+ ctlr->chan_num = 0;
+ spin_lock_init(&ctlr->lock);
+
+ if (cpdma_desc_pool_create(ctlr))
+ return NULL;
+ /* split pool equally between RX/TX by default */
+ ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
+ ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
+
+ if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
+ ctlr->num_chan = CPDMA_MAX_CHANNELS;
+ return ctlr;
+}
+
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
+{
+ struct cpdma_chan *chan;
+ unsigned long flags;
+ int i, prio_mode;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_IDLE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EBUSY;
+ }
+
+ if (ctlr->params.has_soft_reset) {
+ unsigned timeout = 10 * 100;
+
+ dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
+ while (timeout) {
+ if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
+ break;
+ udelay(10);
+ timeout--;
+ }
+ WARN_ON(!timeout);
+ }
+
+ for (i = 0; i < ctlr->num_chan; i++) {
+ writel(0, ctlr->params.txhdp + 4 * i);
+ writel(0, ctlr->params.rxhdp + 4 * i);
+ writel(0, ctlr->params.txcp + 4 * i);
+ writel(0, ctlr->params.rxcp + 4 * i);
+ }
+
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
+
+ ctlr->state = CPDMA_STATE_ACTIVE;
+
+ prio_mode = 0;
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ chan = ctlr->channels[i];
+ if (chan) {
+ cpdma_chan_set_chan_shaper(chan);
+ cpdma_chan_on(chan);
+
+ /* off prio mode if all tx channels are rate limited */
+ if (is_tx_chan(chan) && !chan->rate)
+ prio_mode = 1;
+ }
+ }
+
+ _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
+ _cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ ctlr->state = CPDMA_STATE_TEARDOWN;
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_stop(ctlr->channels[i]);
+ }
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
+ dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
+
+ dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
+ dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
+
+ ctlr->state = CPDMA_STATE_IDLE;
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
+{
+ int ret = 0, i;
+
+ if (!ctlr)
+ return -EINVAL;
+
+ if (ctlr->state != CPDMA_STATE_IDLE)
+ cpdma_ctlr_stop(ctlr);
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
+ cpdma_chan_destroy(ctlr->channels[i]);
+
+ cpdma_desc_pool_destroy(ctlr);
+ return ret;
+}
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
+{
+ unsigned long flags;
+ int i;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ if (ctlr->channels[i])
+ cpdma_chan_int_ctrl(ctlr->channels[i], enable);
+ }
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
+{
+ dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
+}
+
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
+{
+ return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
+}
+
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
+{
+ return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
+}
+
+static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
+ int rx, int desc_num,
+ int per_ch_desc)
+{
+ struct cpdma_chan *chan, *most_chan = NULL;
+ int desc_cnt = desc_num;
+ int most_dnum = 0;
+ int min, max, i;
+
+ if (!desc_num)
+ return;
+
+ if (rx) {
+ min = rx_chan_num(0);
+ max = rx_chan_num(CPDMA_MAX_CHANNELS);
+ } else {
+ min = tx_chan_num(0);
+ max = tx_chan_num(CPDMA_MAX_CHANNELS);
+ }
+
+ for (i = min; i < max; i++) {
+ chan = ctlr->channels[i];
+ if (!chan)
+ continue;
+
+ if (chan->weight)
+ chan->desc_num = (chan->weight * desc_num) / 100;
+ else
+ chan->desc_num = per_ch_desc;
+
+ desc_cnt -= chan->desc_num;
+
+ if (most_dnum < chan->desc_num) {
+ most_dnum = chan->desc_num;
+ most_chan = chan;
+ }
+ }
+ /* use remains */
+ if (most_chan)
+ most_chan->desc_num += desc_cnt;
+}
+
+/*
+ * cpdma_chan_split_pool - Splits ctrl pool between all channels.
+ * Has to be called under ctlr lock
+ */
+static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+{
+ int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
+ int free_rx_num = 0, free_tx_num = 0;
+ int rx_weight = 0, tx_weight = 0;
+ int tx_desc_num, rx_desc_num;
+ struct cpdma_chan *chan;
+ int i;
+
+ if (!ctlr->chan_num)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+ chan = ctlr->channels[i];
+ if (!chan)
+ continue;
+
+ if (is_rx_chan(chan)) {
+ if (!chan->weight)
+ free_rx_num++;
+ rx_weight += chan->weight;
+ } else {
+ if (!chan->weight)
+ free_tx_num++;
+ tx_weight += chan->weight;
+ }
+ }
+
+ if (rx_weight > 100 || tx_weight > 100)
+ return -EINVAL;
+
+ tx_desc_num = ctlr->num_tx_desc;
+ rx_desc_num = ctlr->num_rx_desc;
+
+ if (free_tx_num) {
+ tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
+ tx_per_ch_desc /= free_tx_num;
+ }
+ if (free_rx_num) {
+ rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
+ rx_per_ch_desc /= free_rx_num;
+ }
+
+ cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
+ cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
+
+ return 0;
+}
+
+
+/* cpdma_chan_set_weight - set weight of a channel in percentage.
+ * Tx and Rx channels have separate weights. That is 100% for RX
+ * and 100% for Tx. The weight is used to split cpdma resources
+ * in correct proportion required by the channels, including number
+ * of descriptors. The channel rate is not enough to know the
+ * weight of a channel as the maximum rate of an interface is needed.
+ * If weight = 0, then channel uses rest of descriptors leaved by
+ * weighted channels.
+ */
+int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
+{
+ struct cpdma_ctlr *ctlr = ch->ctlr;
+ unsigned long flags, ch_flags;
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ spin_lock_irqsave(&ch->lock, ch_flags);
+ if (ch->weight == weight) {
+ spin_unlock_irqrestore(&ch->lock, ch_flags);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+ }
+ ch->weight = weight;
+ spin_unlock_irqrestore(&ch->lock, ch_flags);
+
+ /* re-split pool using new channel weight */
+ ret = cpdma_chan_split_pool(ctlr);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
+
+/* cpdma_chan_get_min_rate - get minimum allowed rate for channel
+ * Should be called before cpdma_chan_set_rate.
+ * Returns min rate in Kb/s
+ */
+u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
+{
+ unsigned int divident, divisor;
+
+ divident = ctlr->params.bus_freq_mhz * 32 * 1000;
+ divisor = 1 + CPDMA_MAX_RLIM_CNT;
+
+ return DIV_ROUND_UP(divident, divisor);
+}
+
+/* cpdma_chan_set_rate - limits bandwidth for transmit channel.
+ * The bandwidth * limited channels have to be in order beginning from lowest.
+ * ch - transmit channel the bandwidth is configured for
+ * rate - bandwidth in Kb/s, if 0 - then off shaper
+ */
+int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
+{
+ unsigned long flags, ch_flags;
+ struct cpdma_ctlr *ctlr;
+ int ret, prio_mode;
+ u32 rmask;
+
+ if (!ch || !is_tx_chan(ch))
+ return -EINVAL;
+
+ if (ch->rate == rate)
+ return rate;
+
+ ctlr = ch->ctlr;
+ spin_lock_irqsave(&ctlr->lock, flags);
+ spin_lock_irqsave(&ch->lock, ch_flags);
+
+ ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
+ if (ret)
+ goto err;
+
+ ret = cpdma_chan_set_factors(ctlr, ch);
+ if (ret)
+ goto err;
+
+ spin_unlock_irqrestore(&ch->lock, ch_flags);
+
+ /* on shapers */
+ _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
+ _cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+
+err:
+ spin_unlock_irqrestore(&ch->lock, ch_flags);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return ret;
+}
+
+u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
+{
+ unsigned long flags;
+ u32 rate;
+
+ spin_lock_irqsave(&ch->lock, flags);
+ rate = ch->rate;
+ spin_unlock_irqrestore(&ch->lock, flags);
+
+ return rate;
+}
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler, int rx_type)
+{
+ int offset = chan_num * 4;
+ struct cpdma_chan *chan;
+ unsigned long flags;
+
+ chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
+
+ if (__chan_linear(chan_num) >= ctlr->num_chan)
+ return ERR_PTR(-EINVAL);
+
+ chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (ctlr->channels[chan_num]) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ devm_kfree(ctlr->dev, chan);
+ return ERR_PTR(-EBUSY);
+ }
+
+ chan->ctlr = ctlr;
+ chan->state = CPDMA_STATE_IDLE;
+ chan->chan_num = chan_num;
+ chan->handler = handler;
+ chan->rate = 0;
+ chan->weight = 0;
+
+ if (is_rx_chan(chan)) {
+ chan->hdp = ctlr->params.rxhdp + offset;
+ chan->cp = ctlr->params.rxcp + offset;
+ chan->rxfree = ctlr->params.rxfree + offset;
+ chan->int_set = CPDMA_RXINTMASKSET;
+ chan->int_clear = CPDMA_RXINTMASKCLEAR;
+ chan->td = CPDMA_RXTEARDOWN;
+ chan->dir = DMA_FROM_DEVICE;
+ } else {
+ chan->hdp = ctlr->params.txhdp + offset;
+ chan->cp = ctlr->params.txcp + offset;
+ chan->int_set = CPDMA_TXINTMASKSET;
+ chan->int_clear = CPDMA_TXINTMASKCLEAR;
+ chan->td = CPDMA_TXTEARDOWN;
+ chan->dir = DMA_TO_DEVICE;
+ }
+ chan->mask = BIT(chan_linear(chan));
+
+ spin_lock_init(&chan->lock);
+
+ ctlr->channels[chan_num] = chan;
+ ctlr->chan_num++;
+
+ cpdma_chan_split_pool(ctlr);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return chan;
+}
+
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
+{
+ unsigned long flags;
+ int desc_num;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ desc_num = chan->desc_num;
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return desc_num;
+}
+
+int cpdma_chan_destroy(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr;
+ unsigned long flags;
+
+ if (!chan)
+ return -EINVAL;
+ ctlr = chan->ctlr;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ if (chan->state != CPDMA_STATE_IDLE)
+ cpdma_chan_stop(chan);
+ ctlr->channels[chan->chan_num] = NULL;
+ ctlr->chan_num--;
+ devm_kfree(ctlr->dev, chan);
+ cpdma_chan_split_pool(ctlr);
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats)
+{
+ unsigned long flags;
+ if (!chan)
+ return -EINVAL;
+ spin_lock_irqsave(&chan->lock, flags);
+ memcpy(stats, &chan->stats, sizeof(*stats));
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+static void __cpdma_chan_submit(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *prev = chan->tail;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ u32 mode;
+
+ desc_dma = desc_phys(pool, desc);
+
+ /* simple case - idle channel */
+ if (!chan->head) {
+ chan->stats.head_enqueue++;
+ chan->head = desc;
+ chan->tail = desc;
+ if (chan->state == CPDMA_STATE_ACTIVE)
+ chan_write(chan, hdp, desc_dma);
+ return;
+ }
+
+ /* first chain the descriptor at the tail of the list */
+ desc_write(prev, hw_next, desc_dma);
+ chan->tail = desc;
+ chan->stats.tail_enqueue++;
+
+ /* next check if EOQ has been triggered already */
+ mode = desc_read(prev, hw_mode);
+ if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
+ (chan->state == CPDMA_STATE_ACTIVE)) {
+ desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
+ chan_write(chan, hdp, desc_dma);
+ chan->stats.misqueued++;
+ }
+}
+
+static int cpdma_chan_submit_si(struct submit_info *si)
+{
+ struct cpdma_chan *chan = si->chan;
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ int len = si->len;
+ struct cpdma_desc __iomem *desc;
+ dma_addr_t buffer;
+ u32 mode;
+ int ret;
+
+ if (chan->count >= chan->desc_num) {
+ chan->stats.desc_alloc_fail++;
+ return -ENOMEM;
+ }
+
+ desc = cpdma_desc_alloc(ctlr->pool);
+ if (!desc) {
+ chan->stats.desc_alloc_fail++;
+ return -ENOMEM;
+ }
+
+ if (len < ctlr->params.min_packet_size) {
+ len = ctlr->params.min_packet_size;
+ chan->stats.runt_transmit_buff++;
+ }
+
+ mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
+ cpdma_desc_to_port(chan, mode, si->directed);
+
+ if (si->data_dma) {
+ buffer = si->data_dma;
+ dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
+ } else {
+ buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
+ ret = dma_mapping_error(ctlr->dev, buffer);
+ if (ret) {
+ cpdma_desc_free(ctlr->pool, desc, 1);
+ return -EINVAL;
+ }
+ }
+
+ /* Relaxed IO accessors can be used here as there is read barrier
+ * at the end of write sequence.
+ */
+ writel_relaxed(0, &desc->hw_next);
+ writel_relaxed(buffer, &desc->hw_buffer);
+ writel_relaxed(len, &desc->hw_len);
+ writel_relaxed(mode | len, &desc->hw_mode);
+ writel_relaxed((uintptr_t)si->token, &desc->sw_token);
+ writel_relaxed(buffer, &desc->sw_buffer);
+ writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len,
+ &desc->sw_len);
+ desc_read(desc, sw_len);
+
+ __cpdma_chan_submit(chan, desc);
+
+ if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
+ chan_write(chan, rxfree, 1);
+
+ chan->count++;
+ return 0;
+}
+
+int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data_virt = data;
+ si.data_dma = 0;
+ si.len = len;
+ si.directed = directed;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data_virt = NULL;
+ si.data_dma = data;
+ si.len = len;
+ si.directed = directed;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data_virt = data;
+ si.data_dma = 0;
+ si.len = len;
+ si.directed = directed;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed)
+{
+ struct submit_info si;
+ unsigned long flags;
+ int ret;
+
+ si.chan = chan;
+ si.token = token;
+ si.data_virt = NULL;
+ si.data_dma = data;
+ si.len = len;
+ si.directed = directed;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ ret = cpdma_chan_submit_si(&si);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return ret;
+}
+
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ bool free_tx_desc;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ free_tx_desc = (chan->count < chan->desc_num) &&
+ gen_pool_avail(pool->gen_pool);
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return free_tx_desc;
+}
+
+static void __cpdma_chan_free(struct cpdma_chan *chan,
+ struct cpdma_desc __iomem *desc,
+ int outlen, int status)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t buff_dma;
+ int origlen;
+ uintptr_t token;
+
+ token = desc_read(desc, sw_token);
+ origlen = desc_read(desc, sw_len);
+
+ buff_dma = desc_read(desc, sw_buffer);
+ if (origlen & CPDMA_DMA_EXT_MAP) {
+ origlen &= ~CPDMA_DMA_EXT_MAP;
+ dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
+ chan->dir);
+ } else {
+ dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
+ }
+
+ cpdma_desc_free(pool, desc, 1);
+ (*chan->handler)((void *)token, outlen, status);
+}
+
+static int __cpdma_chan_process(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc __iomem *desc;
+ int status, outlen;
+ int cb_status = 0;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ dma_addr_t desc_dma;
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+
+ desc = chan->head;
+ if (!desc) {
+ chan->stats.empty_dequeue++;
+ status = -ENOENT;
+ goto unlock_ret;
+ }
+ desc_dma = desc_phys(pool, desc);
+
+ status = desc_read(desc, hw_mode);
+ outlen = status & 0x7ff;
+ if (status & CPDMA_DESC_OWNER) {
+ chan->stats.busy_dequeue++;
+ status = -EBUSY;
+ goto unlock_ret;
+ }
+
+ if (status & CPDMA_DESC_PASS_CRC)
+ outlen -= CPDMA_DESC_CRC_LEN;
+
+ status = status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
+ CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
+
+ chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
+ chan_write(chan, cp, desc_dma);
+ chan->count--;
+ chan->stats.good_dequeue++;
+
+ if ((status & CPDMA_DESC_EOQ) && chan->head) {
+ chan->stats.requeue++;
+ chan_write(chan, hdp, desc_phys(pool, chan->head));
+ }
+
+ spin_unlock_irqrestore(&chan->lock, flags);
+ if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
+ cb_status = -ENOSYS;
+ else
+ cb_status = status;
+
+ __cpdma_chan_free(chan, desc, outlen, cb_status);
+ return status;
+
+unlock_ret:
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return status;
+}
+
+int cpdma_chan_process(struct cpdma_chan *chan, int quota)
+{
+ int used = 0, ret = 0;
+
+ if (chan->state != CPDMA_STATE_ACTIVE)
+ return -EINVAL;
+
+ while (used < quota) {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ used++;
+ }
+ return used;
+}
+
+int cpdma_chan_start(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ ret = cpdma_chan_set_chan_shaper(chan);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ if (ret)
+ return ret;
+
+ ret = cpdma_chan_on(chan);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+int cpdma_chan_stop(struct cpdma_chan *chan)
+{
+ struct cpdma_ctlr *ctlr = chan->ctlr;
+ struct cpdma_desc_pool *pool = ctlr->pool;
+ unsigned long flags;
+ int ret;
+ unsigned timeout;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ chan->state = CPDMA_STATE_TEARDOWN;
+ dma_reg_write(ctlr, chan->int_clear, chan->mask);
+
+ /* trigger teardown */
+ dma_reg_write(ctlr, chan->td, chan_linear(chan));
+
+ /* wait for teardown complete */
+ timeout = 100 * 100; /* 100 ms */
+ while (timeout) {
+ u32 cp = chan_read(chan, cp);
+ if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
+ break;
+ udelay(10);
+ timeout--;
+ }
+ WARN_ON(!timeout);
+ chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
+
+ /* handle completed packets */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ do {
+ ret = __cpdma_chan_process(chan);
+ if (ret < 0)
+ break;
+ } while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
+ spin_lock_irqsave(&chan->lock, flags);
+
+ /* remaining packets haven't been tx/rx'ed, clean them up */
+ while (chan->head) {
+ struct cpdma_desc __iomem *desc = chan->head;
+ dma_addr_t next_dma;
+
+ next_dma = desc_read(desc, hw_next);
+ chan->head = desc_from_phys(pool, next_dma);
+ chan->count--;
+ chan->stats.teardown_dequeue++;
+
+ /* issue callback without locks held */
+ spin_unlock_irqrestore(&chan->lock, flags);
+ __cpdma_chan_free(chan, desc, 0, -ENOSYS);
+ spin_lock_irqsave(&chan->lock, flags);
+ }
+
+ chan->state = CPDMA_STATE_IDLE;
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return 0;
+}
+
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->lock, flags);
+ if (chan->state != CPDMA_STATE_ACTIVE) {
+ spin_unlock_irqrestore(&chan->lock, flags);
+ return -EINVAL;
+ }
+
+ dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
+ chan->mask);
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ return 0;
+}
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ ret = _cpdma_control_get(ctlr, control);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ return ret;
+}
+
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+ ret = _cpdma_control_set(ctlr, control, value);
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ return ret;
+}
+
+int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
+{
+ return ctlr->num_rx_desc;
+}
+
+int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
+{
+ return ctlr->num_tx_desc;
+}
+
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
+{
+ unsigned long flags;
+ int temp, ret;
+
+ spin_lock_irqsave(&ctlr->lock, flags);
+
+ temp = ctlr->num_rx_desc;
+ ctlr->num_rx_desc = num_rx_desc;
+ ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+ ret = cpdma_chan_split_pool(ctlr);
+ if (ret) {
+ ctlr->num_rx_desc = temp;
+ ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
+ }
+
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+
+ return ret;
+}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
new file mode 100644
index 000000000..d3cfe234d
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Texas Instruments CPDMA Driver
+ *
+ * Copyright (C) 2010 Texas Instruments
+ *
+ */
+#ifndef __DAVINCI_CPDMA_H__
+#define __DAVINCI_CPDMA_H__
+
+#define CPDMA_MAX_CHANNELS BITS_PER_LONG
+
+#define CPDMA_RX_SOURCE_PORT(__status__) ((__status__ >> 16) & 0x7)
+
+#define CPDMA_RX_VLAN_ENCAP BIT(19)
+
+#define CPDMA_EOI_RX_THRESH 0x0
+#define CPDMA_EOI_RX 0x1
+#define CPDMA_EOI_TX 0x2
+#define CPDMA_EOI_MISC 0x3
+
+struct cpdma_params {
+ struct device *dev;
+ void __iomem *dmaregs;
+ void __iomem *txhdp, *rxhdp, *txcp, *rxcp;
+ void __iomem *rxthresh, *rxfree;
+ int num_chan;
+ bool has_soft_reset;
+ int min_packet_size;
+ dma_addr_t desc_mem_phys;
+ dma_addr_t desc_hw_addr;
+ int desc_mem_size;
+ int desc_align;
+ u32 bus_freq_mhz;
+ u32 descs_pool_size;
+
+ /*
+ * Some instances of embedded cpdma controllers have extra control and
+ * status registers. The following flag enables access to these
+ * "extended" registers.
+ */
+ bool has_ext_regs;
+};
+
+struct cpdma_chan_stats {
+ u32 head_enqueue;
+ u32 tail_enqueue;
+ u32 pad_enqueue;
+ u32 misqueued;
+ u32 desc_alloc_fail;
+ u32 pad_alloc_fail;
+ u32 runt_receive_buff;
+ u32 runt_transmit_buff;
+ u32 empty_dequeue;
+ u32 busy_dequeue;
+ u32 good_dequeue;
+ u32 requeue;
+ u32 teardown_dequeue;
+};
+
+struct cpdma_ctlr;
+struct cpdma_chan;
+
+typedef void (*cpdma_handler_fn)(void *token, int len, int status);
+
+struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params);
+int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
+int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
+
+struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
+ cpdma_handler_fn handler, int rx_type);
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan);
+int cpdma_chan_destroy(struct cpdma_chan *chan);
+int cpdma_chan_start(struct cpdma_chan *chan);
+int cpdma_chan_stop(struct cpdma_chan *chan);
+
+int cpdma_chan_get_stats(struct cpdma_chan *chan,
+ struct cpdma_chan_stats *stats);
+int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed);
+int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, int directed);
+int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
+ dma_addr_t data, int len, int directed);
+int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
+ int len, int directed);
+int cpdma_chan_process(struct cpdma_chan *chan, int quota);
+
+int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
+void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
+int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr);
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr);
+bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
+int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight);
+int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate);
+u32 cpdma_chan_get_rate(struct cpdma_chan *ch);
+u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr);
+
+enum cpdma_control {
+ CPDMA_TX_RLIM, /* read-write */
+ CPDMA_CMD_IDLE, /* write-only */
+ CPDMA_COPY_ERROR_FRAMES, /* read-write */
+ CPDMA_RX_OFF_LEN_UPDATE, /* read-write */
+ CPDMA_RX_OWNERSHIP_FLIP, /* read-write */
+ CPDMA_TX_PRIO_FIXED, /* read-write */
+ CPDMA_STAT_IDLE, /* read-only */
+ CPDMA_STAT_TX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_TX_ERR_CODE, /* read-only */
+ CPDMA_STAT_RX_ERR_CHAN, /* read-only */
+ CPDMA_STAT_RX_ERR_CODE, /* read-only */
+ CPDMA_RX_BUFFER_OFFSET, /* read-write */
+};
+
+int cpdma_control_get(struct cpdma_ctlr *ctlr, int control);
+int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value);
+int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr);
+int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc);
+int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr);
+
+#endif
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
new file mode 100644
index 000000000..2eb9d5a32
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -0,0 +1,2109 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DaVinci Ethernet Medium Access Controller
+ *
+ * DaVinci EMAC is based upon CPPI 3.0 TI DMA engine
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ * ---------------------------------------------------------------------------
+ * History:
+ * 0-5 A number of folks worked on this driver in bits and pieces but the major
+ * contribution came from Suraj Iyer and Anant Gole
+ * 6.0 Anant Gole - rewrote the driver as per Linux conventions
+ * 6.1 Chaithrika U S - added support for Gigabit and RMII features,
+ * PHY layer usage
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/proc_fs.h>
+#include <linux/ctype.h>
+#include <linux/spinlock.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/semaphore.h>
+#include <linux/phy.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+#include <linux/pm_runtime.h>
+#include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+
+#include <asm/irq.h>
+#include <asm/page.h>
+
+#include "cpsw.h"
+#include "davinci_cpdma.h"
+
+static int debug_level;
+module_param(debug_level, int, 0);
+MODULE_PARM_DESC(debug_level, "DaVinci EMAC debug level (NETIF_MSG bits)");
+
+/* Netif debug messages possible */
+#define DAVINCI_EMAC_DEBUG (NETIF_MSG_DRV | \
+ NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK | \
+ NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | \
+ NETIF_MSG_IFUP | \
+ NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | \
+ NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_INTR | \
+ NETIF_MSG_TX_DONE | \
+ NETIF_MSG_RX_STATUS | \
+ NETIF_MSG_PKTDATA | \
+ NETIF_MSG_HW | \
+ NETIF_MSG_WOL)
+
+/* version info */
+#define EMAC_MAJOR_VERSION 6
+#define EMAC_MINOR_VERSION 1
+#define EMAC_MODULE_VERSION "6.1"
+MODULE_VERSION(EMAC_MODULE_VERSION);
+static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
+
+/* Configuration items */
+#define EMAC_DEF_PASS_CRC (0) /* Do not pass CRC up to frames */
+#define EMAC_DEF_QOS_EN (0) /* EMAC proprietary QoS disabled */
+#define EMAC_DEF_NO_BUFF_CHAIN (0) /* No buffer chain */
+#define EMAC_DEF_MACCTRL_FRAME_EN (0) /* Discard Maccontrol frames */
+#define EMAC_DEF_SHORT_FRAME_EN (0) /* Discard short frames */
+#define EMAC_DEF_ERROR_FRAME_EN (0) /* Discard error frames */
+#define EMAC_DEF_PROM_EN (0) /* Promiscuous disabled */
+#define EMAC_DEF_PROM_CH (0) /* Promiscuous channel is 0 */
+#define EMAC_DEF_BCAST_EN (1) /* Broadcast enabled */
+#define EMAC_DEF_BCAST_CH (0) /* Broadcast channel is 0 */
+#define EMAC_DEF_MCAST_EN (1) /* Multicast enabled */
+#define EMAC_DEF_MCAST_CH (0) /* Multicast channel is 0 */
+
+#define EMAC_DEF_TXPRIO_FIXED (1) /* TX Priority is fixed */
+#define EMAC_DEF_TXPACING_EN (0) /* TX pacing NOT supported*/
+
+#define EMAC_DEF_BUFFER_OFFSET (0) /* Buffer offset to DMA (future) */
+#define EMAC_DEF_MIN_ETHPKTSIZE (60) /* Minimum ethernet pkt size */
+#define EMAC_DEF_MAX_FRAME_SIZE (1500 + 14 + 4 + 4)
+#define EMAC_DEF_TX_CH (0) /* Default 0th channel */
+#define EMAC_DEF_RX_CH (0) /* Default 0th channel */
+#define EMAC_DEF_RX_NUM_DESC (128)
+#define EMAC_DEF_MAX_TX_CH (1) /* Max TX channels configured */
+#define EMAC_DEF_MAX_RX_CH (1) /* Max RX channels configured */
+
+/* Buffer descriptor parameters */
+#define EMAC_DEF_TX_MAX_SERVICE (32) /* TX max service BD's */
+#define EMAC_DEF_RX_MAX_SERVICE (64) /* should = netdev->weight */
+
+/* EMAC register related defines */
+#define EMAC_ALL_MULTI_REG_VALUE (0xFFFFFFFF)
+#define EMAC_NUM_MULTICAST_BITS (64)
+#define EMAC_TX_CONTROL_TX_ENABLE_VAL (0x1)
+#define EMAC_RX_CONTROL_RX_ENABLE_VAL (0x1)
+#define EMAC_MAC_HOST_ERR_INTMASK_VAL (0x2)
+#define EMAC_RX_UNICAST_CLEAR_ALL (0xFF)
+#define EMAC_INT_MASK_CLEAR (0xFF)
+
+/* RX MBP register bit positions */
+#define EMAC_RXMBP_PASSCRC_MASK BIT(30)
+#define EMAC_RXMBP_QOSEN_MASK BIT(29)
+#define EMAC_RXMBP_NOCHAIN_MASK BIT(28)
+#define EMAC_RXMBP_CMFEN_MASK BIT(24)
+#define EMAC_RXMBP_CSFEN_MASK BIT(23)
+#define EMAC_RXMBP_CEFEN_MASK BIT(22)
+#define EMAC_RXMBP_CAFEN_MASK BIT(21)
+#define EMAC_RXMBP_PROMCH_SHIFT (16)
+#define EMAC_RXMBP_PROMCH_MASK (0x7 << 16)
+#define EMAC_RXMBP_BROADEN_MASK BIT(13)
+#define EMAC_RXMBP_BROADCH_SHIFT (8)
+#define EMAC_RXMBP_BROADCH_MASK (0x7 << 8)
+#define EMAC_RXMBP_MULTIEN_MASK BIT(5)
+#define EMAC_RXMBP_MULTICH_SHIFT (0)
+#define EMAC_RXMBP_MULTICH_MASK (0x7)
+#define EMAC_RXMBP_CHMASK (0x7)
+
+/* EMAC register definitions/bit maps used */
+# define EMAC_MBP_RXPROMISC (0x00200000)
+# define EMAC_MBP_PROMISCCH(ch) (((ch) & 0x7) << 16)
+# define EMAC_MBP_RXBCAST (0x00002000)
+# define EMAC_MBP_BCASTCHAN(ch) (((ch) & 0x7) << 8)
+# define EMAC_MBP_RXMCAST (0x00000020)
+# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7)
+
+/* EMAC mac_control register */
+#define EMAC_MACCONTROL_TXPTYPE BIT(9)
+#define EMAC_MACCONTROL_TXPACEEN BIT(6)
+#define EMAC_MACCONTROL_GMIIEN BIT(5)
+#define EMAC_MACCONTROL_GIGABITEN BIT(7)
+#define EMAC_MACCONTROL_FULLDUPLEXEN BIT(0)
+#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15)
+
+/* GIGABIT MODE related bits */
+#define EMAC_DM646X_MACCONTORL_GIG BIT(7)
+#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17)
+
+/* EMAC mac_status register */
+#define EMAC_MACSTATUS_TXERRCODE_MASK (0xF00000)
+#define EMAC_MACSTATUS_TXERRCODE_SHIFT (20)
+#define EMAC_MACSTATUS_TXERRCH_MASK (0x70000)
+#define EMAC_MACSTATUS_TXERRCH_SHIFT (16)
+#define EMAC_MACSTATUS_RXERRCODE_MASK (0xF000)
+#define EMAC_MACSTATUS_RXERRCODE_SHIFT (12)
+#define EMAC_MACSTATUS_RXERRCH_MASK (0x700)
+#define EMAC_MACSTATUS_RXERRCH_SHIFT (8)
+
+/* EMAC RX register masks */
+#define EMAC_RX_MAX_LEN_MASK (0xFFFF)
+#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF)
+
+/* MAC_IN_VECTOR (0x180) register bit fields */
+#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT BIT(17)
+#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16)
+#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC BIT(8)
+#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC BIT(0)
+
+/** NOTE:: For DM646x the IN_VECTOR has changed */
+#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC BIT(16 + EMAC_DEF_TX_CH)
+#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
+#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
+
+/* CPPI bit positions */
+#define EMAC_CPPI_SOP_BIT BIT(31)
+#define EMAC_CPPI_EOP_BIT BIT(30)
+#define EMAC_CPPI_OWNERSHIP_BIT BIT(29)
+#define EMAC_CPPI_EOQ_BIT BIT(28)
+#define EMAC_CPPI_TEARDOWN_COMPLETE_BIT BIT(27)
+#define EMAC_CPPI_PASS_CRC_BIT BIT(26)
+#define EMAC_RX_BD_BUF_SIZE (0xFFFF)
+#define EMAC_BD_LENGTH_FOR_CACHE (16) /* only CPPI bytes */
+#define EMAC_RX_BD_PKT_LENGTH_MASK (0xFFFF)
+
+/* Max hardware defines */
+#define EMAC_MAX_TXRX_CHANNELS (8) /* Max hardware channels */
+#define EMAC_DEF_MAX_MULTICAST_ADDRESSES (64) /* Max mcast addr's */
+
+/* EMAC Peripheral Device Register Memory Layout structure */
+#define EMAC_MACINVECTOR 0x90
+
+#define EMAC_DM646X_MACEOIVECTOR 0x94
+
+#define EMAC_MACINTSTATRAW 0xB0
+#define EMAC_MACINTSTATMASKED 0xB4
+#define EMAC_MACINTMASKSET 0xB8
+#define EMAC_MACINTMASKCLEAR 0xBC
+
+#define EMAC_RXMBPENABLE 0x100
+#define EMAC_RXUNICASTSET 0x104
+#define EMAC_RXUNICASTCLEAR 0x108
+#define EMAC_RXMAXLEN 0x10C
+#define EMAC_RXBUFFEROFFSET 0x110
+#define EMAC_RXFILTERLOWTHRESH 0x114
+
+#define EMAC_MACCONTROL 0x160
+#define EMAC_MACSTATUS 0x164
+#define EMAC_EMCONTROL 0x168
+#define EMAC_FIFOCONTROL 0x16C
+#define EMAC_MACCONFIG 0x170
+#define EMAC_SOFTRESET 0x174
+#define EMAC_MACSRCADDRLO 0x1D0
+#define EMAC_MACSRCADDRHI 0x1D4
+#define EMAC_MACHASH1 0x1D8
+#define EMAC_MACHASH2 0x1DC
+#define EMAC_MACADDRLO 0x500
+#define EMAC_MACADDRHI 0x504
+#define EMAC_MACINDEX 0x508
+
+/* EMAC statistics registers */
+#define EMAC_RXGOODFRAMES 0x200
+#define EMAC_RXBCASTFRAMES 0x204
+#define EMAC_RXMCASTFRAMES 0x208
+#define EMAC_RXPAUSEFRAMES 0x20C
+#define EMAC_RXCRCERRORS 0x210
+#define EMAC_RXALIGNCODEERRORS 0x214
+#define EMAC_RXOVERSIZED 0x218
+#define EMAC_RXJABBER 0x21C
+#define EMAC_RXUNDERSIZED 0x220
+#define EMAC_RXFRAGMENTS 0x224
+#define EMAC_RXFILTERED 0x228
+#define EMAC_RXQOSFILTERED 0x22C
+#define EMAC_RXOCTETS 0x230
+#define EMAC_TXGOODFRAMES 0x234
+#define EMAC_TXBCASTFRAMES 0x238
+#define EMAC_TXMCASTFRAMES 0x23C
+#define EMAC_TXPAUSEFRAMES 0x240
+#define EMAC_TXDEFERRED 0x244
+#define EMAC_TXCOLLISION 0x248
+#define EMAC_TXSINGLECOLL 0x24C
+#define EMAC_TXMULTICOLL 0x250
+#define EMAC_TXEXCESSIVECOLL 0x254
+#define EMAC_TXLATECOLL 0x258
+#define EMAC_TXUNDERRUN 0x25C
+#define EMAC_TXCARRIERSENSE 0x260
+#define EMAC_TXOCTETS 0x264
+#define EMAC_NETOCTETS 0x280
+#define EMAC_RXSOFOVERRUNS 0x284
+#define EMAC_RXMOFOVERRUNS 0x288
+#define EMAC_RXDMAOVERRUNS 0x28C
+
+/* EMAC DM644x control registers */
+#define EMAC_CTRL_EWCTL (0x4)
+#define EMAC_CTRL_EWINTTCNT (0x8)
+
+/* EMAC DM644x control module masks */
+#define EMAC_DM644X_EWINTCNT_MASK 0x1FFFF
+#define EMAC_DM644X_INTMIN_INTVL 0x1
+#define EMAC_DM644X_INTMAX_INTVL (EMAC_DM644X_EWINTCNT_MASK)
+
+/* EMAC DM646X control module registers */
+#define EMAC_DM646X_CMINTCTRL 0x0C
+#define EMAC_DM646X_CMRXINTEN 0x14
+#define EMAC_DM646X_CMTXINTEN 0x18
+#define EMAC_DM646X_CMRXINTMAX 0x70
+#define EMAC_DM646X_CMTXINTMAX 0x74
+
+/* EMAC DM646X control module masks */
+#define EMAC_DM646X_INTPACEEN (0x3 << 16)
+#define EMAC_DM646X_INTPRESCALE_MASK (0x7FF << 0)
+#define EMAC_DM646X_CMINTMAX_CNT 63
+#define EMAC_DM646X_CMINTMIN_CNT 2
+#define EMAC_DM646X_CMINTMAX_INTVL (1000 / EMAC_DM646X_CMINTMIN_CNT)
+#define EMAC_DM646X_CMINTMIN_INTVL ((1000 / EMAC_DM646X_CMINTMAX_CNT) + 1)
+
+
+/* EMAC EOI codes for C0 */
+#define EMAC_DM646X_MAC_EOI_C0_RXEN (0x01)
+#define EMAC_DM646X_MAC_EOI_C0_TXEN (0x02)
+
+/* EMAC Stats Clear Mask */
+#define EMAC_STATS_CLR_MASK (0xFFFFFFFF)
+
+/* emac_priv: EMAC private data structure
+ *
+ * EMAC adapter private data structure
+ */
+struct emac_priv {
+ u32 msg_enable;
+ struct net_device *ndev;
+ struct platform_device *pdev;
+ struct napi_struct napi;
+ char mac_addr[6];
+ void __iomem *remap_addr;
+ u32 emac_base_phys;
+ void __iomem *emac_base;
+ void __iomem *ctrl_base;
+ struct cpdma_ctlr *dma;
+ struct cpdma_chan *txchan;
+ struct cpdma_chan *rxchan;
+ u32 link; /* 1=link on, 0=link off */
+ u32 speed; /* 0=Auto Neg, 1=No PHY, 10,100, 1000 - mbps */
+ u32 duplex; /* Link duplex: 0=Half, 1=Full */
+ u32 rx_buf_size;
+ u32 isr_count;
+ u32 coal_intvl;
+ u32 bus_freq_mhz;
+ u8 rmii_en;
+ u8 version;
+ u32 mac_hash1;
+ u32 mac_hash2;
+ u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
+ u32 rx_addr_type;
+ const char *phy_id;
+ struct device_node *phy_node;
+ spinlock_t lock;
+ /*platform specific members*/
+ void (*int_enable) (void);
+ void (*int_disable) (void);
+};
+
+/* EMAC TX Host Error description strings */
+static char *emac_txhost_errcodes[16] = {
+ "No error", "SOP error", "Ownership bit not set in SOP buffer",
+ "Zero Next Buffer Descriptor Pointer Without EOP",
+ "Zero Buffer Pointer", "Zero Buffer Length", "Packet Length Error",
+ "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* EMAC RX Host Error description strings */
+static char *emac_rxhost_errcodes[16] = {
+ "No error", "Reserved", "Ownership bit not set in input buffer",
+ "Reserved", "Zero Buffer Pointer", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved", "Reserved",
+ "Reserved", "Reserved", "Reserved", "Reserved"
+};
+
+/* Helper macros */
+#define emac_read(reg) ioread32(priv->emac_base + (reg))
+#define emac_write(reg, val) iowrite32(val, priv->emac_base + (reg))
+
+#define emac_ctrl_read(reg) ioread32((priv->ctrl_base + (reg)))
+#define emac_ctrl_write(reg, val) iowrite32(val, (priv->ctrl_base + (reg)))
+
+/**
+ * emac_get_drvinfo - Get EMAC driver information
+ * @ndev: The DaVinci EMAC network adapter
+ * @info: ethtool info structure containing name and version
+ *
+ * Returns EMAC driver information (name and version)
+ *
+ */
+static void emac_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ strscpy(info->driver, emac_version_string, sizeof(info->driver));
+ strscpy(info->version, EMAC_MODULE_VERSION, sizeof(info->version));
+}
+
+/**
+ * emac_get_coalesce - Get interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
+ *
+ * Fetch the current interrupt coalesce settings
+ *
+ */
+static int emac_get_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ coal->rx_coalesce_usecs = priv->coal_intvl;
+ return 0;
+
+}
+
+/**
+ * emac_set_coalesce - Set interrupt coalesce settings for this device
+ * @ndev : The DaVinci EMAC network adapter
+ * @coal : ethtool coalesce settings structure
+ * @kernel_coal: ethtool CQE mode setting structure
+ * @extack: extack for reporting error messages
+ *
+ * Set interrupt coalesce parameters
+ *
+ */
+static int emac_set_coalesce(struct net_device *ndev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 int_ctrl, num_interrupts = 0;
+ u32 prescale = 0, addnl_dvdr = 1, coal_intvl = 0;
+
+ if (!coal->rx_coalesce_usecs) {
+ priv->coal_intvl = 0;
+
+ switch (priv->version) {
+ case EMAC_VERSION_2:
+ emac_ctrl_write(EMAC_DM646X_CMINTCTRL, 0);
+ break;
+ default:
+ emac_ctrl_write(EMAC_CTRL_EWINTTCNT, 0);
+ break;
+ }
+
+ return 0;
+ }
+
+ coal_intvl = coal->rx_coalesce_usecs;
+
+ switch (priv->version) {
+ case EMAC_VERSION_2:
+ int_ctrl = emac_ctrl_read(EMAC_DM646X_CMINTCTRL);
+ prescale = priv->bus_freq_mhz * 4;
+
+ if (coal_intvl < EMAC_DM646X_CMINTMIN_INTVL)
+ coal_intvl = EMAC_DM646X_CMINTMIN_INTVL;
+
+ if (coal_intvl > EMAC_DM646X_CMINTMAX_INTVL) {
+ /*
+ * Interrupt pacer works with 4us Pulse, we can
+ * throttle further by dilating the 4us pulse.
+ */
+ addnl_dvdr = EMAC_DM646X_INTPRESCALE_MASK / prescale;
+
+ if (addnl_dvdr > 1) {
+ prescale *= addnl_dvdr;
+ if (coal_intvl > (EMAC_DM646X_CMINTMAX_INTVL
+ * addnl_dvdr))
+ coal_intvl = (EMAC_DM646X_CMINTMAX_INTVL
+ * addnl_dvdr);
+ } else {
+ addnl_dvdr = 1;
+ coal_intvl = EMAC_DM646X_CMINTMAX_INTVL;
+ }
+ }
+
+ num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
+
+ int_ctrl |= EMAC_DM646X_INTPACEEN;
+ int_ctrl &= (~EMAC_DM646X_INTPRESCALE_MASK);
+ int_ctrl |= (prescale & EMAC_DM646X_INTPRESCALE_MASK);
+ emac_ctrl_write(EMAC_DM646X_CMINTCTRL, int_ctrl);
+
+ emac_ctrl_write(EMAC_DM646X_CMRXINTMAX, num_interrupts);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTMAX, num_interrupts);
+
+ break;
+ default:
+ int_ctrl = emac_ctrl_read(EMAC_CTRL_EWINTTCNT);
+ int_ctrl &= (~EMAC_DM644X_EWINTCNT_MASK);
+ prescale = coal_intvl * priv->bus_freq_mhz;
+ if (prescale > EMAC_DM644X_EWINTCNT_MASK) {
+ prescale = EMAC_DM644X_EWINTCNT_MASK;
+ coal_intvl = prescale / priv->bus_freq_mhz;
+ }
+ emac_ctrl_write(EMAC_CTRL_EWINTTCNT, (int_ctrl | prescale));
+
+ break;
+ }
+
+ printk(KERN_INFO"Set coalesce to %d usecs.\n", coal_intvl);
+ priv->coal_intvl = coal_intvl;
+
+ return 0;
+
+}
+
+
+/* ethtool_ops: DaVinci EMAC Ethtool structure
+ *
+ * Ethtool support for EMAC adapter
+ */
+static const struct ethtool_ops ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS,
+ .get_drvinfo = emac_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = emac_get_coalesce,
+ .set_coalesce = emac_set_coalesce,
+ .get_ts_info = ethtool_op_get_ts_info,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
+};
+
+/**
+ * emac_update_phystatus - Update Phy status
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Updates phy status and takes action for network queue if required
+ * based upon link status
+ *
+ */
+static void emac_update_phystatus(struct emac_priv *priv)
+{
+ u32 mac_control;
+ u32 new_duplex;
+ u32 cur_duplex;
+ struct net_device *ndev = priv->ndev;
+
+ mac_control = emac_read(EMAC_MACCONTROL);
+ cur_duplex = (mac_control & EMAC_MACCONTROL_FULLDUPLEXEN) ?
+ DUPLEX_FULL : DUPLEX_HALF;
+ if (ndev->phydev)
+ new_duplex = ndev->phydev->duplex;
+ else
+ new_duplex = DUPLEX_FULL;
+
+ /* We get called only if link has changed (speed/duplex/status) */
+ if ((priv->link) && (new_duplex != cur_duplex)) {
+ priv->duplex = new_duplex;
+ if (DUPLEX_FULL == priv->duplex)
+ mac_control |= (EMAC_MACCONTROL_FULLDUPLEXEN);
+ else
+ mac_control &= ~(EMAC_MACCONTROL_FULLDUPLEXEN);
+ }
+
+ if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
+ mac_control = emac_read(EMAC_MACCONTROL);
+ mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
+ EMAC_DM646X_MACCONTORL_GIGFORCE);
+ } else {
+ /* Clear the GIG bit and GIGFORCE bit */
+ mac_control &= ~(EMAC_DM646X_MACCONTORL_GIGFORCE |
+ EMAC_DM646X_MACCONTORL_GIG);
+
+ if (priv->rmii_en && (priv->speed == SPEED_100))
+ mac_control |= EMAC_MACCONTROL_RMIISPEED_MASK;
+ else
+ mac_control &= ~EMAC_MACCONTROL_RMIISPEED_MASK;
+ }
+
+ /* Update mac_control if changed */
+ emac_write(EMAC_MACCONTROL, mac_control);
+
+ if (priv->link) {
+ /* link ON */
+ if (!netif_carrier_ok(ndev))
+ netif_carrier_on(ndev);
+ /* reactivate the transmit queue if it is stopped */
+ if (netif_running(ndev) && netif_queue_stopped(ndev))
+ netif_wake_queue(ndev);
+ } else {
+ /* link OFF */
+ if (netif_carrier_ok(ndev))
+ netif_carrier_off(ndev);
+ if (!netif_queue_stopped(ndev))
+ netif_stop_queue(ndev);
+ }
+}
+
+/**
+ * hash_get - Calculate hash value from mac address
+ * @addr: mac address to delete from hash table
+ *
+ * Calculates hash value from mac address
+ *
+ */
+static u32 hash_get(u8 *addr)
+{
+ u32 hash;
+ u8 tmpval;
+ int cnt;
+ hash = 0;
+
+ for (cnt = 0; cnt < 2; cnt++) {
+ tmpval = *addr++;
+ hash ^= (tmpval >> 2) ^ (tmpval << 4);
+ tmpval = *addr++;
+ hash ^= (tmpval >> 4) ^ (tmpval << 2);
+ tmpval = *addr++;
+ hash ^= (tmpval >> 6) ^ (tmpval);
+ }
+
+ return hash & 0x3F;
+}
+
+/**
+ * emac_hash_add - Hash function to add mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * @mac_addr: mac address to delete from hash table
+ *
+ * Adds mac address to the internal hash table
+ *
+ */
+static int emac_hash_add(struct emac_priv *priv, u8 *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ u32 rc = 0;
+ u32 hash_bit;
+ u32 hash_value = hash_get(mac_addr);
+
+ if (hash_value >= EMAC_NUM_MULTICAST_BITS) {
+ if (netif_msg_drv(priv)) {
+ dev_err(emac_dev, "DaVinci EMAC: emac_hash_add(): Invalid "\
+ "Hash %08x, should not be greater than %08x",
+ hash_value, (EMAC_NUM_MULTICAST_BITS - 1));
+ }
+ return -1;
+ }
+
+ /* set the hash bit only if not previously set */
+ if (priv->multicast_hash_cnt[hash_value] == 0) {
+ rc = 1; /* hash value changed */
+ if (hash_value < 32) {
+ hash_bit = BIT(hash_value);
+ priv->mac_hash1 |= hash_bit;
+ } else {
+ hash_bit = BIT((hash_value - 32));
+ priv->mac_hash2 |= hash_bit;
+ }
+ }
+
+ /* incr counter for num of mcast addr's mapped to "this" hash bit */
+ ++priv->multicast_hash_cnt[hash_value];
+
+ return rc;
+}
+
+/**
+ * emac_hash_del - Hash function to delete mac addr from hash table
+ * @priv: The DaVinci EMAC private adapter structure
+ * @mac_addr: mac address to delete from hash table
+ *
+ * Removes mac address from the internal hash table
+ *
+ */
+static int emac_hash_del(struct emac_priv *priv, u8 *mac_addr)
+{
+ u32 hash_value;
+ u32 hash_bit;
+
+ hash_value = hash_get(mac_addr);
+ if (priv->multicast_hash_cnt[hash_value] > 0) {
+ /* dec cntr for num of mcast addr's mapped to this hash bit */
+ --priv->multicast_hash_cnt[hash_value];
+ }
+
+ /* if counter still > 0, at least one multicast address refers
+ * to this hash bit. so return 0 */
+ if (priv->multicast_hash_cnt[hash_value] > 0)
+ return 0;
+
+ if (hash_value < 32) {
+ hash_bit = BIT(hash_value);
+ priv->mac_hash1 &= ~hash_bit;
+ } else {
+ hash_bit = BIT((hash_value - 32));
+ priv->mac_hash2 &= ~hash_bit;
+ }
+
+ /* return 1 to indicate change in mac_hash registers reqd */
+ return 1;
+}
+
+/* EMAC multicast operation */
+#define EMAC_MULTICAST_ADD 0
+#define EMAC_MULTICAST_DEL 1
+#define EMAC_ALL_MULTI_SET 2
+#define EMAC_ALL_MULTI_CLR 3
+
+/**
+ * emac_add_mcast - Set multicast address in the EMAC adapter (Internal)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @action: multicast operation to perform
+ * @mac_addr: mac address to set
+ *
+ * Set multicast addresses in EMAC adapter - internal function
+ *
+ */
+static void emac_add_mcast(struct emac_priv *priv, u32 action, u8 *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+ int update = -1;
+
+ switch (action) {
+ case EMAC_MULTICAST_ADD:
+ update = emac_hash_add(priv, mac_addr);
+ break;
+ case EMAC_MULTICAST_DEL:
+ update = emac_hash_del(priv, mac_addr);
+ break;
+ case EMAC_ALL_MULTI_SET:
+ update = 1;
+ priv->mac_hash1 = EMAC_ALL_MULTI_REG_VALUE;
+ priv->mac_hash2 = EMAC_ALL_MULTI_REG_VALUE;
+ break;
+ case EMAC_ALL_MULTI_CLR:
+ update = 1;
+ priv->mac_hash1 = 0;
+ priv->mac_hash2 = 0;
+ memset(&(priv->multicast_hash_cnt[0]), 0,
+ sizeof(priv->multicast_hash_cnt[0]) *
+ EMAC_NUM_MULTICAST_BITS);
+ break;
+ default:
+ if (netif_msg_drv(priv))
+ dev_err(emac_dev, "DaVinci EMAC: add_mcast"\
+ ": bad operation %d", action);
+ break;
+ }
+
+ /* write to the hardware only if the register status chances */
+ if (update > 0) {
+ emac_write(EMAC_MACHASH1, priv->mac_hash1);
+ emac_write(EMAC_MACHASH2, priv->mac_hash2);
+ }
+}
+
+/**
+ * emac_dev_mcast_set - Set multicast address in the EMAC adapter
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Set multicast addresses in EMAC adapter
+ *
+ */
+static void emac_dev_mcast_set(struct net_device *ndev)
+{
+ u32 mbp_enable;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ mbp_enable = emac_read(EMAC_RXMBPENABLE);
+ if (ndev->flags & IFF_PROMISC) {
+ mbp_enable &= (~EMAC_MBP_PROMISCCH(EMAC_DEF_PROM_CH));
+ mbp_enable |= (EMAC_MBP_RXPROMISC);
+ } else {
+ mbp_enable = (mbp_enable & ~EMAC_MBP_RXPROMISC);
+ if ((ndev->flags & IFF_ALLMULTI) ||
+ netdev_mc_count(ndev) > EMAC_DEF_MAX_MULTICAST_ADDRESSES) {
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_SET, NULL);
+ } else if (!netdev_mc_empty(ndev)) {
+ struct netdev_hw_addr *ha;
+
+ mbp_enable = (mbp_enable | EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+ /* program multicast address list into EMAC hardware */
+ netdev_for_each_mc_addr(ha, ndev) {
+ emac_add_mcast(priv, EMAC_MULTICAST_ADD,
+ (u8 *) ha->addr);
+ }
+ } else {
+ mbp_enable = (mbp_enable & ~EMAC_MBP_RXMCAST);
+ emac_add_mcast(priv, EMAC_ALL_MULTI_CLR, NULL);
+ }
+ }
+ /* Set mbp config register */
+ emac_write(EMAC_RXMBPENABLE, mbp_enable);
+}
+
+/*************************************************************************
+ * EMAC Hardware manipulation
+ *************************************************************************/
+
+/**
+ * emac_int_disable - Disable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Disable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_disable(struct emac_priv *priv)
+{
+ if (priv->version == EMAC_VERSION_2) {
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* Program C0_Int_En to zero to turn off
+ * interrupts to the CPU */
+ emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0x0);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0x0);
+ /* NOTE: Rx Threshold and Misc interrupts are not disabled */
+ if (priv->int_disable)
+ priv->int_disable();
+
+ /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+ /* ack rxen only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+ /* ack txen- only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_TXEN);
+
+ local_irq_restore(flags);
+
+ } else {
+ /* Set DM644x control registers for interrupt control */
+ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x0);
+ }
+}
+
+/**
+ * emac_int_enable - Enable EMAC module interrupt (from adapter)
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enable EMAC interrupt on the adapter
+ *
+ */
+static void emac_int_enable(struct emac_priv *priv)
+{
+ if (priv->version == EMAC_VERSION_2) {
+ if (priv->int_enable)
+ priv->int_enable();
+
+ emac_ctrl_write(EMAC_DM646X_CMRXINTEN, 0xff);
+ emac_ctrl_write(EMAC_DM646X_CMTXINTEN, 0xff);
+
+ /* In addition to turning on interrupt Enable, we need
+ * ack by writing appropriate values to the EOI
+ * register */
+
+ /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+ } else {
+ /* Set DM644x control registers for interrupt control */
+ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
+ }
+}
+
+/**
+ * emac_irq - EMAC interrupt handler
+ * @irq: interrupt number
+ * @dev_id: EMAC network adapter data structure ptr
+ *
+ * EMAC Interrupt handler - we only schedule NAPI and not process any packets
+ * here. EVen the interrupt status is checked (TX/RX/Err) in NAPI poll function
+ *
+ * Returns interrupt handled condition
+ */
+static irqreturn_t emac_irq(int irq, void *dev_id)
+{
+ struct net_device *ndev = (struct net_device *)dev_id;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ ++priv->isr_count;
+ if (likely(netif_running(priv->ndev))) {
+ emac_int_disable(priv);
+ napi_schedule(&priv->napi);
+ } else {
+ /* we are closing down, so dont process anything */
+ }
+ return IRQ_HANDLED;
+}
+
+static struct sk_buff *emac_rx_alloc(struct emac_priv *priv)
+{
+ struct sk_buff *skb = netdev_alloc_skb(priv->ndev, priv->rx_buf_size);
+ if (WARN_ON(!skb))
+ return NULL;
+ skb_reserve(skb, NET_IP_ALIGN);
+ return skb;
+}
+
+static void emac_rx_handler(void *token, int len, int status)
+{
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+ int ret;
+
+ /* free and bail if we are shutting down */
+ if (unlikely(!netif_running(ndev))) {
+ dev_kfree_skb_any(skb);
+ return;
+ }
+
+ /* recycle on receive error */
+ if (status < 0) {
+ ndev->stats.rx_errors++;
+ goto recycle;
+ }
+
+ /* feed received packet up the stack */
+ skb_put(skb, len);
+ skb->protocol = eth_type_trans(skb, ndev);
+ netif_receive_skb(skb);
+ ndev->stats.rx_bytes += len;
+ ndev->stats.rx_packets++;
+
+ /* alloc a new packet for receive */
+ skb = emac_rx_alloc(priv);
+ if (!skb) {
+ if (netif_msg_rx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "failed rx buffer alloc\n");
+ return;
+ }
+
+recycle:
+ ret = cpdma_chan_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), 0);
+
+ WARN_ON(ret == -ENOMEM);
+ if (unlikely(ret < 0))
+ dev_kfree_skb_any(skb);
+}
+
+static void emac_tx_handler(void *token, int len, int status)
+{
+ struct sk_buff *skb = token;
+ struct net_device *ndev = skb->dev;
+
+ /* Check whether the queue is stopped due to stalled tx dma, if the
+ * queue is stopped then start the queue as we have free desc for tx
+ */
+ if (unlikely(netif_queue_stopped(ndev)))
+ netif_wake_queue(ndev);
+ ndev->stats.tx_packets++;
+ ndev->stats.tx_bytes += len;
+ dev_kfree_skb_any(skb);
+}
+
+/**
+ * emac_dev_xmit - EMAC Transmit function
+ * @skb: SKB pointer
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called by the system to transmit a packet - we queue the packet in
+ * EMAC hardware transmit queue
+ *
+ * Returns success(NETDEV_TX_OK) or error code (typically out of desc's)
+ */
+static netdev_tx_t emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct device *emac_dev = &ndev->dev;
+ int ret_code;
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ /* If no link, return */
+ if (unlikely(!priv->link)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: No link to transmit");
+ goto fail_tx;
+ }
+
+ ret_code = skb_put_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE);
+ if (unlikely(ret_code < 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: packet pad failed");
+ goto fail_tx;
+ }
+
+ skb_tx_timestamp(skb);
+
+ ret_code = cpdma_chan_submit(priv->txchan, skb, skb->data, skb->len,
+ 0);
+ if (unlikely(ret_code != 0)) {
+ if (netif_msg_tx_err(priv) && net_ratelimit())
+ dev_err(emac_dev, "DaVinci EMAC: desc submit failed");
+ goto fail_tx;
+ }
+
+ /* If there is no more tx desc left free then we need to
+ * tell the kernel to stop sending us tx frames.
+ */
+ if (unlikely(!cpdma_check_free_tx_desc(priv->txchan)))
+ netif_stop_queue(ndev);
+
+ return NETDEV_TX_OK;
+
+fail_tx:
+ ndev->stats.tx_dropped++;
+ netif_stop_queue(ndev);
+ return NETDEV_TX_BUSY;
+}
+
+/**
+ * emac_dev_tx_timeout - EMAC Transmit timeout function
+ * @ndev: The DaVinci EMAC network adapter
+ * @txqueue: the index of the hung transmit queue
+ *
+ * Called when system detects that a skb timeout period has expired
+ * potentially due to a fault in the adapter in not being able to send
+ * it out on the wire. We teardown the TX channel assuming a hardware
+ * error and re-initialize the TX channel for hardware operation
+ *
+ */
+static void emac_dev_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+
+ if (netif_msg_tx_err(priv))
+ dev_err(emac_dev, "DaVinci EMAC: xmit timeout, restarting TX");
+
+ ndev->stats.tx_errors++;
+ emac_int_disable(priv);
+ cpdma_chan_stop(priv->txchan);
+ cpdma_chan_start(priv->txchan);
+ emac_int_enable(priv);
+}
+
+/**
+ * emac_set_type0addr - Set EMAC Type0 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type0 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type0addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ u32 val;
+ val = ((mac_addr[5] << 8) | (mac_addr[4]));
+ emac_write(EMAC_MACSRCADDRLO, val);
+
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACSRCADDRHI, val);
+ val = emac_read(EMAC_RXUNICASTSET);
+ val |= BIT(ch);
+ emac_write(EMAC_RXUNICASTSET, val);
+ val = emac_read(EMAC_RXUNICASTCLEAR);
+ val &= ~BIT(ch);
+ emac_write(EMAC_RXUNICASTCLEAR, val);
+}
+
+/**
+ * emac_set_type1addr - Set EMAC Type1 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set Type1 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type1addr(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ u32 val;
+ emac_write(EMAC_MACINDEX, ch);
+ val = ((mac_addr[5] << 8) | mac_addr[4]);
+ emac_write(EMAC_MACADDRLO, val);
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACADDRHI, val);
+ emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_set_type2addr - Set EMAC Type2 mac address
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ * @index: index into RX address entries
+ * @match: match parameter for RX address matching logic
+ *
+ * Called internally to set Type2 mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_set_type2addr(struct emac_priv *priv, u32 ch,
+ char *mac_addr, int index, int match)
+{
+ u32 val;
+ emac_write(EMAC_MACINDEX, index);
+ val = ((mac_addr[3] << 24) | (mac_addr[2] << 16) | \
+ (mac_addr[1] << 8) | (mac_addr[0]));
+ emac_write(EMAC_MACADDRHI, val);
+ val = ((mac_addr[5] << 8) | mac_addr[4] | ((ch & 0x7) << 16) | \
+ (match << 19) | BIT(20));
+ emac_write(EMAC_MACADDRLO, val);
+ emac_set_type0addr(priv, ch, mac_addr);
+}
+
+/**
+ * emac_setmac - Set mac address in the adapter (internal function)
+ * @priv: The DaVinci EMAC private adapter structure
+ * @ch: RX channel number
+ * @mac_addr: MAC address to set in device
+ *
+ * Called internally to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static void emac_setmac(struct emac_priv *priv, u32 ch, char *mac_addr)
+{
+ struct device *emac_dev = &priv->ndev->dev;
+
+ if (priv->rx_addr_type == 0) {
+ emac_set_type0addr(priv, ch, mac_addr);
+ } else if (priv->rx_addr_type == 1) {
+ u32 cnt;
+ for (cnt = 0; cnt < EMAC_MAX_TXRX_CHANNELS; cnt++)
+ emac_set_type1addr(priv, ch, mac_addr);
+ } else if (priv->rx_addr_type == 2) {
+ emac_set_type2addr(priv, ch, mac_addr, ch, 1);
+ emac_set_type0addr(priv, ch, mac_addr);
+ } else {
+ if (netif_msg_drv(priv))
+ dev_err(emac_dev, "DaVinci EMAC: Wrong addressing\n");
+ }
+}
+
+/**
+ * emac_dev_setmac_addr - Set mac address in the adapter
+ * @ndev: The DaVinci EMAC network adapter
+ * @addr: MAC address to set in device
+ *
+ * Called by the system to set the mac address of the adapter (Device)
+ *
+ * Returns success (0) or appropriate error code (none as of now)
+ */
+static int emac_dev_setmac_addr(struct net_device *ndev, void *addr)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &priv->ndev->dev;
+ struct sockaddr *sa = addr;
+
+ if (!is_valid_ether_addr(sa->sa_data))
+ return -EADDRNOTAVAIL;
+
+ /* Store mac addr in priv and rx channel and set it in EMAC hw */
+ memcpy(priv->mac_addr, sa->sa_data, ndev->addr_len);
+ eth_hw_addr_set(ndev, sa->sa_data);
+
+ /* MAC address is configured only after the interface is enabled. */
+ if (netif_running(ndev)) {
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+ }
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: emac_dev_setmac_addr %pM\n",
+ priv->mac_addr);
+
+ return 0;
+}
+
+/**
+ * emac_hw_enable - Enable EMAC hardware for packet transmission/reception
+ * @priv: The DaVinci EMAC private adapter structure
+ *
+ * Enables EMAC hardware for packet processing - enables PHY, enables RX
+ * for packet reception and enables device interrupts and then NAPI
+ *
+ * Returns success (0) or appropriate error code (none right now)
+ */
+static int emac_hw_enable(struct emac_priv *priv)
+{
+ u32 val, mbp_enable, mac_control;
+
+ /* Soft reset */
+ emac_write(EMAC_SOFTRESET, 1);
+ while (emac_read(EMAC_SOFTRESET))
+ cpu_relax();
+
+ /* Disable interrupt & Set pacing for more interrupts initially */
+ emac_int_disable(priv);
+
+ /* Full duplex enable bit set when auto negotiation happens */
+ mac_control =
+ (((EMAC_DEF_TXPRIO_FIXED) ? (EMAC_MACCONTROL_TXPTYPE) : 0x0) |
+ ((priv->speed == 1000) ? EMAC_MACCONTROL_GIGABITEN : 0x0) |
+ ((EMAC_DEF_TXPACING_EN) ? (EMAC_MACCONTROL_TXPACEEN) : 0x0) |
+ ((priv->duplex == DUPLEX_FULL) ? 0x1 : 0));
+ emac_write(EMAC_MACCONTROL, mac_control);
+
+ mbp_enable =
+ (((EMAC_DEF_PASS_CRC) ? (EMAC_RXMBP_PASSCRC_MASK) : 0x0) |
+ ((EMAC_DEF_QOS_EN) ? (EMAC_RXMBP_QOSEN_MASK) : 0x0) |
+ ((EMAC_DEF_NO_BUFF_CHAIN) ? (EMAC_RXMBP_NOCHAIN_MASK) : 0x0) |
+ ((EMAC_DEF_MACCTRL_FRAME_EN) ? (EMAC_RXMBP_CMFEN_MASK) : 0x0) |
+ ((EMAC_DEF_SHORT_FRAME_EN) ? (EMAC_RXMBP_CSFEN_MASK) : 0x0) |
+ ((EMAC_DEF_ERROR_FRAME_EN) ? (EMAC_RXMBP_CEFEN_MASK) : 0x0) |
+ ((EMAC_DEF_PROM_EN) ? (EMAC_RXMBP_CAFEN_MASK) : 0x0) |
+ ((EMAC_DEF_PROM_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_PROMCH_SHIFT) |
+ ((EMAC_DEF_BCAST_EN) ? (EMAC_RXMBP_BROADEN_MASK) : 0x0) |
+ ((EMAC_DEF_BCAST_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_BROADCH_SHIFT) |
+ ((EMAC_DEF_MCAST_EN) ? (EMAC_RXMBP_MULTIEN_MASK) : 0x0) |
+ ((EMAC_DEF_MCAST_CH & EMAC_RXMBP_CHMASK) << \
+ EMAC_RXMBP_MULTICH_SHIFT));
+ emac_write(EMAC_RXMBPENABLE, mbp_enable);
+ emac_write(EMAC_RXMAXLEN, (EMAC_DEF_MAX_FRAME_SIZE &
+ EMAC_RX_MAX_LEN_MASK));
+ emac_write(EMAC_RXBUFFEROFFSET, (EMAC_DEF_BUFFER_OFFSET &
+ EMAC_RX_BUFFER_OFFSET_MASK));
+ emac_write(EMAC_RXFILTERLOWTHRESH, 0);
+ emac_write(EMAC_RXUNICASTCLEAR, EMAC_RX_UNICAST_CLEAR_ALL);
+ priv->rx_addr_type = (emac_read(EMAC_MACCONFIG) >> 8) & 0xFF;
+
+ emac_write(EMAC_MACINTMASKSET, EMAC_MAC_HOST_ERR_INTMASK_VAL);
+
+ emac_setmac(priv, EMAC_DEF_RX_CH, priv->mac_addr);
+
+ /* Enable MII */
+ val = emac_read(EMAC_MACCONTROL);
+ val |= (EMAC_MACCONTROL_GMIIEN);
+ emac_write(EMAC_MACCONTROL, val);
+
+ /* Enable NAPI and interrupts */
+ napi_enable(&priv->napi);
+ emac_int_enable(priv);
+ return 0;
+
+}
+
+/**
+ * emac_poll - EMAC NAPI Poll function
+ * @napi: pointer to the napi_struct containing The DaVinci EMAC network adapter
+ * @budget: Number of receive packets to process (as told by NAPI layer)
+ *
+ * NAPI Poll function implemented to process packets as per budget. We check
+ * the type of interrupt on the device and accordingly call the TX or RX
+ * packet processing functions. We follow the budget for RX processing and
+ * also put a cap on number of TX pkts processed through config param. The
+ * NAPI schedule function is called if more packets pending.
+ *
+ * Returns number of packets received (in most cases; else TX pkts - rarely)
+ */
+static int emac_poll(struct napi_struct *napi, int budget)
+{
+ unsigned int mask;
+ struct emac_priv *priv = container_of(napi, struct emac_priv, napi);
+ struct net_device *ndev = priv->ndev;
+ struct device *emac_dev = &ndev->dev;
+ u32 status = 0;
+ u32 num_rx_pkts = 0;
+
+ /* Check interrupt vectors and call packet processing */
+ status = emac_read(EMAC_MACINVECTOR);
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC;
+
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_TX_INT_VEC;
+
+ if (status & mask) {
+ cpdma_chan_process(priv->txchan, EMAC_DEF_TX_MAX_SERVICE);
+ } /* TX processing */
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC;
+
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC;
+
+ if (status & mask) {
+ num_rx_pkts = cpdma_chan_process(priv->rxchan, budget);
+ } /* RX processing */
+
+ mask = EMAC_DM644X_MAC_IN_VECTOR_HOST_INT;
+ if (priv->version == EMAC_VERSION_2)
+ mask = EMAC_DM646X_MAC_IN_VECTOR_HOST_INT;
+
+ if (unlikely(status & mask)) {
+ u32 ch, cause;
+ dev_err(emac_dev, "DaVinci EMAC: Fatal Hardware Error\n");
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ status = emac_read(EMAC_MACSTATUS);
+ cause = ((status & EMAC_MACSTATUS_TXERRCODE_MASK) >>
+ EMAC_MACSTATUS_TXERRCODE_SHIFT);
+ if (cause) {
+ ch = ((status & EMAC_MACSTATUS_TXERRCH_MASK) >>
+ EMAC_MACSTATUS_TXERRCH_SHIFT);
+ if (net_ratelimit()) {
+ dev_err(emac_dev, "TX Host error %s on ch=%d\n",
+ &emac_txhost_errcodes[cause][0], ch);
+ }
+ }
+ cause = ((status & EMAC_MACSTATUS_RXERRCODE_MASK) >>
+ EMAC_MACSTATUS_RXERRCODE_SHIFT);
+ if (cause) {
+ ch = ((status & EMAC_MACSTATUS_RXERRCH_MASK) >>
+ EMAC_MACSTATUS_RXERRCH_SHIFT);
+ if (netif_msg_hw(priv) && net_ratelimit())
+ dev_err(emac_dev, "RX Host error %s on ch=%d\n",
+ &emac_rxhost_errcodes[cause][0], ch);
+ }
+ } else if (num_rx_pkts < budget) {
+ napi_complete_done(napi, num_rx_pkts);
+ emac_int_enable(priv);
+ }
+
+ return num_rx_pkts;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * emac_poll_controller - EMAC Poll controller function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+static void emac_poll_controller(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+
+ emac_int_disable(priv);
+ emac_irq(ndev->irq, ndev);
+ emac_int_enable(priv);
+}
+#endif
+
+static void emac_adjust_link(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = ndev->phydev;
+ unsigned long flags;
+ int new_state = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ if (phydev->link) {
+ /* check the mode of operation - full/half duplex */
+ if (phydev->duplex != priv->duplex) {
+ new_state = 1;
+ priv->duplex = phydev->duplex;
+ }
+ if (phydev->speed != priv->speed) {
+ new_state = 1;
+ priv->speed = phydev->speed;
+ }
+ if (!priv->link) {
+ new_state = 1;
+ priv->link = 1;
+ }
+
+ } else if (priv->link) {
+ new_state = 1;
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = ~0;
+ }
+ if (new_state) {
+ emac_update_phystatus(priv);
+ phy_print_status(ndev->phydev);
+ }
+
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/*************************************************************************
+ * Linux Driver Model
+ *************************************************************************/
+
+/**
+ * emac_devioctl - EMAC adapter ioctl
+ * @ndev: The DaVinci EMAC network adapter
+ * @ifrq: request parameter
+ * @cmd: command parameter
+ *
+ * EMAC driver ioctl function
+ *
+ * Returns success(0) or appropriate error code
+ */
+static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
+{
+ if (!(netif_running(ndev)))
+ return -EINVAL;
+
+ /* TODO: Add phy read and write and private statistics get feature */
+
+ if (ndev->phydev)
+ return phy_mii_ioctl(ndev->phydev, ifrq, cmd);
+ else
+ return -EOPNOTSUPP;
+}
+
+static int match_first_device(struct device *dev, const void *data)
+{
+ if (dev->parent && dev->parent->of_node)
+ return of_device_is_compatible(dev->parent->of_node,
+ "ti,davinci_mdio");
+
+ return !strncmp(dev_name(dev), "davinci_mdio", 12);
+}
+
+/**
+ * emac_dev_open - EMAC device open
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to start the interface. We init TX/RX channels
+ * and enable the hardware for packet reception/transmission and start the
+ * network queue.
+ *
+ * Returns 0 for a successful open, or appropriate error code
+ */
+static int emac_dev_open(struct net_device *ndev)
+{
+ struct device *emac_dev = &ndev->dev;
+ struct resource *res;
+ int q, m, ret;
+ int res_num = 0, irq_num = 0;
+ int i = 0;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct phy_device *phydev = NULL;
+ struct device *phy = NULL;
+
+ ret = pm_runtime_resume_and_get(&priv->pdev->dev);
+ if (ret < 0) {
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ netif_carrier_off(ndev);
+ eth_hw_addr_set(ndev, priv->mac_addr);
+
+ /* Configuration items */
+ priv->rx_buf_size = EMAC_DEF_MAX_FRAME_SIZE + NET_IP_ALIGN;
+
+ priv->mac_hash1 = 0;
+ priv->mac_hash2 = 0;
+ emac_write(EMAC_MACHASH1, 0);
+ emac_write(EMAC_MACHASH2, 0);
+
+ for (i = 0; i < EMAC_DEF_RX_NUM_DESC; i++) {
+ struct sk_buff *skb = emac_rx_alloc(priv);
+
+ if (!skb)
+ break;
+
+ ret = cpdma_chan_idle_submit(priv->rxchan, skb, skb->data,
+ skb_tailroom(skb), 0);
+ if (WARN_ON(ret < 0))
+ break;
+ }
+
+ /* Request IRQ */
+ if (dev_of_node(&priv->pdev->dev)) {
+ while ((ret = platform_get_irq_optional(priv->pdev, res_num)) != -ENXIO) {
+ if (ret < 0)
+ goto rollback;
+
+ ret = request_irq(ret, emac_irq, 0, ndev->name, ndev);
+ if (ret) {
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed\n");
+ goto rollback;
+ }
+ res_num++;
+ }
+ } else {
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, res_num))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++) {
+ ret = request_irq(irq_num, emac_irq, 0, ndev->name, ndev);
+ if (ret) {
+ dev_err(emac_dev, "DaVinci EMAC: request_irq() failed\n");
+ goto rollback;
+ }
+ }
+ res_num++;
+ }
+ /* prepare counters for rollback in case of an error */
+ res_num--;
+ irq_num--;
+ }
+
+ /* Start/Enable EMAC hardware */
+ emac_hw_enable(priv);
+
+ /* Enable Interrupt pacing if configured */
+ if (priv->coal_intvl != 0) {
+ struct ethtool_coalesce coal;
+
+ coal.rx_coalesce_usecs = (priv->coal_intvl << 4);
+ emac_set_coalesce(ndev, &coal, NULL, NULL);
+ }
+
+ cpdma_ctlr_start(priv->dma);
+
+ if (priv->phy_node) {
+ phydev = of_phy_connect(ndev, priv->phy_node,
+ &emac_adjust_link, 0, 0);
+ if (!phydev) {
+ dev_err(emac_dev, "could not connect to phy %pOF\n",
+ priv->phy_node);
+ ret = -ENODEV;
+ goto err;
+ }
+ }
+
+ /* use the first phy on the bus if pdata did not give us a phy id */
+ if (!phydev && !priv->phy_id) {
+ /* NOTE: we can't use bus_find_device_by_name() here because
+ * the device name is not guaranteed to be 'davinci_mdio'. On
+ * some systems it can be 'davinci_mdio.0' so we need to use
+ * strncmp() against the first part of the string to correctly
+ * match it.
+ */
+ phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+ match_first_device);
+ if (phy) {
+ priv->phy_id = dev_name(phy);
+ if (!priv->phy_id || !*priv->phy_id)
+ put_device(phy);
+ }
+ }
+
+ if (!phydev && priv->phy_id && *priv->phy_id) {
+ phydev = phy_connect(ndev, priv->phy_id,
+ &emac_adjust_link,
+ PHY_INTERFACE_MODE_MII);
+ put_device(phy); /* reference taken by bus_find_device */
+ if (IS_ERR(phydev)) {
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_id);
+ ret = PTR_ERR(phydev);
+ goto err;
+ }
+
+ priv->link = 0;
+ priv->speed = 0;
+ priv->duplex = ~0;
+
+ phy_attached_info(phydev);
+ }
+
+ if (!phydev) {
+ /* No PHY , fix the link, speed and duplex settings */
+ dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
+ priv->link = 1;
+ priv->speed = SPEED_100;
+ priv->duplex = DUPLEX_FULL;
+ emac_update_phystatus(priv);
+ }
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: Opened %s\n", ndev->name);
+
+ if (phydev)
+ phy_start(phydev);
+
+ return 0;
+
+err:
+ emac_int_disable(priv);
+ napi_disable(&priv->napi);
+
+rollback:
+ if (dev_of_node(&priv->pdev->dev)) {
+ for (q = res_num - 1; q >= 0; q--) {
+ irq_num = platform_get_irq(priv->pdev, q);
+ if (irq_num > 0)
+ free_irq(irq_num, ndev);
+ }
+ } else {
+ for (q = res_num; q >= 0; q--) {
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
+ /* at the first iteration, irq_num is already set to the
+ * right value
+ */
+ if (q != res_num)
+ irq_num = res->end;
+
+ for (m = irq_num; m >= res->start; m--)
+ free_irq(m, ndev);
+ }
+ }
+ cpdma_ctlr_stop(priv->dma);
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
+}
+
+/**
+ * emac_dev_stop - EMAC device stop
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to stop or down the interface. We stop the network
+ * queue, disable interrupts and cleanup TX/RX channels.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static int emac_dev_stop(struct net_device *ndev)
+{
+ struct resource *res;
+ int i = 0;
+ int irq_num;
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device *emac_dev = &ndev->dev;
+ int ret = 0;
+
+ /* inform the upper layers. */
+ netif_stop_queue(ndev);
+ napi_disable(&priv->napi);
+
+ netif_carrier_off(ndev);
+ emac_int_disable(priv);
+ cpdma_ctlr_stop(priv->dma);
+ emac_write(EMAC_SOFTRESET, 1);
+
+ if (ndev->phydev)
+ phy_disconnect(ndev->phydev);
+
+ /* Free IRQ */
+ if (dev_of_node(&priv->pdev->dev)) {
+ do {
+ ret = platform_get_irq_optional(priv->pdev, i);
+ if (ret < 0 && ret != -ENXIO)
+ break;
+ if (ret > 0) {
+ free_irq(ret, priv->ndev);
+ } else {
+ ret = 0;
+ break;
+ }
+ } while (++i);
+ } else {
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
+ }
+
+ if (netif_msg_drv(priv))
+ dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
+
+ pm_runtime_put(&priv->pdev->dev);
+ return ret;
+}
+
+/**
+ * emac_dev_getnetstats - EMAC get statistics function
+ * @ndev: The DaVinci EMAC network adapter
+ *
+ * Called when system wants to get statistics from the device.
+ *
+ * We return the statistics in net_device_stats structure pulled from emac
+ */
+static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
+{
+ struct emac_priv *priv = netdev_priv(ndev);
+ u32 mac_control;
+ u32 stats_clear_mask;
+ int err;
+
+ err = pm_runtime_resume_and_get(&priv->pdev->dev);
+ if (err < 0) {
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ return &ndev->stats;
+ }
+
+ /* update emac hardware stats and reset the registers*/
+
+ mac_control = emac_read(EMAC_MACCONTROL);
+
+ if (mac_control & EMAC_MACCONTROL_GMIIEN)
+ stats_clear_mask = EMAC_STATS_CLR_MASK;
+ else
+ stats_clear_mask = 0;
+
+ ndev->stats.multicast += emac_read(EMAC_RXMCASTFRAMES);
+ emac_write(EMAC_RXMCASTFRAMES, stats_clear_mask);
+
+ ndev->stats.collisions += (emac_read(EMAC_TXCOLLISION) +
+ emac_read(EMAC_TXSINGLECOLL) +
+ emac_read(EMAC_TXMULTICOLL));
+ emac_write(EMAC_TXCOLLISION, stats_clear_mask);
+ emac_write(EMAC_TXSINGLECOLL, stats_clear_mask);
+ emac_write(EMAC_TXMULTICOLL, stats_clear_mask);
+
+ ndev->stats.rx_length_errors += (emac_read(EMAC_RXOVERSIZED) +
+ emac_read(EMAC_RXJABBER) +
+ emac_read(EMAC_RXUNDERSIZED));
+ emac_write(EMAC_RXOVERSIZED, stats_clear_mask);
+ emac_write(EMAC_RXJABBER, stats_clear_mask);
+ emac_write(EMAC_RXUNDERSIZED, stats_clear_mask);
+
+ ndev->stats.rx_over_errors += (emac_read(EMAC_RXSOFOVERRUNS) +
+ emac_read(EMAC_RXMOFOVERRUNS));
+ emac_write(EMAC_RXSOFOVERRUNS, stats_clear_mask);
+ emac_write(EMAC_RXMOFOVERRUNS, stats_clear_mask);
+
+ ndev->stats.rx_fifo_errors += emac_read(EMAC_RXDMAOVERRUNS);
+ emac_write(EMAC_RXDMAOVERRUNS, stats_clear_mask);
+
+ ndev->stats.tx_carrier_errors +=
+ emac_read(EMAC_TXCARRIERSENSE);
+ emac_write(EMAC_TXCARRIERSENSE, stats_clear_mask);
+
+ ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
+ emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
+
+ pm_runtime_put(&priv->pdev->dev);
+
+ return &ndev->stats;
+}
+
+static const struct net_device_ops emac_netdev_ops = {
+ .ndo_open = emac_dev_open,
+ .ndo_stop = emac_dev_stop,
+ .ndo_start_xmit = emac_dev_xmit,
+ .ndo_set_rx_mode = emac_dev_mcast_set,
+ .ndo_set_mac_address = emac_dev_setmac_addr,
+ .ndo_eth_ioctl = emac_devioctl,
+ .ndo_tx_timeout = emac_dev_tx_timeout,
+ .ndo_get_stats = emac_dev_getnetstats,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = emac_poll_controller,
+#endif
+};
+
+static const struct of_device_id davinci_emac_of_match[];
+
+static struct emac_platform_data *
+davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ const struct emac_platform_data *auxdata;
+ struct emac_platform_data *pdata = NULL;
+
+ if (!IS_ENABLED(CONFIG_OF) || !pdev->dev.of_node)
+ return dev_get_platdata(&pdev->dev);
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ np = pdev->dev.of_node;
+ pdata->version = EMAC_VERSION_2;
+
+ if (!is_valid_ether_addr(pdata->mac_addr))
+ of_get_mac_address(np, pdata->mac_addr);
+
+ of_property_read_u32(np, "ti,davinci-ctrl-reg-offset",
+ &pdata->ctrl_reg_offset);
+
+ of_property_read_u32(np, "ti,davinci-ctrl-mod-reg-offset",
+ &pdata->ctrl_mod_reg_offset);
+
+ of_property_read_u32(np, "ti,davinci-ctrl-ram-offset",
+ &pdata->ctrl_ram_offset);
+
+ of_property_read_u32(np, "ti,davinci-ctrl-ram-size",
+ &pdata->ctrl_ram_size);
+
+ of_property_read_u8(np, "ti,davinci-rmii-en", &pdata->rmii_en);
+
+ pdata->no_bd_ram = of_property_read_bool(np, "ti,davinci-no-bd-ram");
+
+ priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!priv->phy_node) {
+ if (!of_phy_is_fixed_link(np))
+ pdata->phy_id = NULL;
+ else if (of_phy_register_fixed_link(np) >= 0)
+ priv->phy_node = of_node_get(np);
+ }
+
+ auxdata = pdev->dev.platform_data;
+ if (auxdata) {
+ pdata->interrupt_enable = auxdata->interrupt_enable;
+ pdata->interrupt_disable = auxdata->interrupt_disable;
+ }
+
+ match = of_match_device(davinci_emac_of_match, &pdev->dev);
+ if (match && match->data) {
+ auxdata = match->data;
+ pdata->version = auxdata->version;
+ pdata->hw_ram_addr = auxdata->hw_ram_addr;
+ }
+
+ return pdata;
+}
+
+static int davinci_emac_try_get_mac(struct platform_device *pdev,
+ int instance, u8 *mac_addr)
+{
+ if (!pdev->dev.of_node)
+ return -EINVAL;
+
+ return ti_cm_get_macid(&pdev->dev, instance, mac_addr);
+}
+
+/**
+ * davinci_emac_probe - EMAC device probe
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when probing for emac devicesr. We get details of instances and
+ * resource information from platform init and register a network device
+ * and allocate resources necessary for driver to perform
+ */
+static int davinci_emac_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ int rc = 0;
+ struct resource *res, *res_ctrl;
+ struct net_device *ndev;
+ struct emac_priv *priv;
+ unsigned long hw_ram_addr;
+ struct emac_platform_data *pdata;
+ struct cpdma_params dma_params;
+ struct clk *emac_clk;
+ unsigned long emac_bus_frequency;
+
+
+ /* obtain emac clock from kernel */
+ emac_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(emac_clk)) {
+ dev_err(&pdev->dev, "failed to get EMAC clock\n");
+ return -EBUSY;
+ }
+ emac_bus_frequency = clk_get_rate(emac_clk);
+ devm_clk_put(&pdev->dev, emac_clk);
+
+ /* TODO: Probe PHY here if possible */
+
+ ndev = alloc_etherdev(sizeof(struct emac_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+ priv = netdev_priv(ndev);
+ priv->pdev = pdev;
+ priv->ndev = ndev;
+ priv->msg_enable = netif_msg_init(debug_level, DAVINCI_EMAC_DEBUG);
+
+ spin_lock_init(&priv->lock);
+
+ pdata = davinci_emac_of_get_pdata(pdev, priv);
+ if (!pdata) {
+ dev_err(&pdev->dev, "no platform data\n");
+ rc = -ENODEV;
+ goto err_free_netdev;
+ }
+
+ /* MAC addr and PHY mask , RMII enable info from platform_data */
+ memcpy(priv->mac_addr, pdata->mac_addr, ETH_ALEN);
+ priv->phy_id = pdata->phy_id;
+ priv->rmii_en = pdata->rmii_en;
+ priv->version = pdata->version;
+ priv->int_enable = pdata->interrupt_enable;
+ priv->int_disable = pdata->interrupt_disable;
+
+ priv->coal_intvl = 0;
+ priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
+
+ /* Get EMAC platform data */
+ priv->remap_addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(priv->remap_addr)) {
+ rc = PTR_ERR(priv->remap_addr);
+ goto no_pdata;
+ }
+ priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
+
+ res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_ctrl) {
+ priv->ctrl_base =
+ devm_ioremap_resource(&pdev->dev, res_ctrl);
+ if (IS_ERR(priv->ctrl_base)) {
+ rc = PTR_ERR(priv->ctrl_base);
+ goto no_pdata;
+ }
+ } else {
+ priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+ }
+
+ priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
+ ndev->base_addr = (unsigned long)priv->remap_addr;
+
+ hw_ram_addr = pdata->hw_ram_addr;
+ if (!hw_ram_addr)
+ hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+ memset(&dma_params, 0, sizeof(dma_params));
+ dma_params.dev = &pdev->dev;
+ dma_params.dmaregs = priv->emac_base;
+ dma_params.rxthresh = priv->emac_base + 0x120;
+ dma_params.rxfree = priv->emac_base + 0x140;
+ dma_params.txhdp = priv->emac_base + 0x600;
+ dma_params.rxhdp = priv->emac_base + 0x620;
+ dma_params.txcp = priv->emac_base + 0x640;
+ dma_params.rxcp = priv->emac_base + 0x660;
+ dma_params.num_chan = EMAC_MAX_TXRX_CHANNELS;
+ dma_params.min_packet_size = EMAC_DEF_MIN_ETHPKTSIZE;
+ dma_params.desc_hw_addr = hw_ram_addr;
+ dma_params.desc_mem_size = pdata->ctrl_ram_size;
+ dma_params.desc_align = 16;
+
+ dma_params.desc_mem_phys = pdata->no_bd_ram ? 0 :
+ (u32 __force)res->start + pdata->ctrl_ram_offset;
+
+ priv->dma = cpdma_ctlr_create(&dma_params);
+ if (!priv->dma) {
+ dev_err(&pdev->dev, "error initializing DMA\n");
+ rc = -ENOMEM;
+ goto no_pdata;
+ }
+
+ priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
+ emac_tx_handler, 0);
+ if (IS_ERR(priv->txchan)) {
+ dev_err(&pdev->dev, "error initializing tx dma channel\n");
+ rc = PTR_ERR(priv->txchan);
+ goto err_free_dma;
+ }
+
+ priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
+ emac_rx_handler, 1);
+ if (IS_ERR(priv->rxchan)) {
+ dev_err(&pdev->dev, "error initializing rx dma channel\n");
+ rc = PTR_ERR(priv->rxchan);
+ goto err_free_txchan;
+ }
+
+ rc = platform_get_irq(pdev, 0);
+ if (rc < 0)
+ goto err_free_rxchan;
+ ndev->irq = rc;
+
+ rc = davinci_emac_try_get_mac(pdev, res_ctrl ? 0 : 1, priv->mac_addr);
+ if (!rc)
+ eth_hw_addr_set(ndev, priv->mac_addr);
+
+ if (!is_valid_ether_addr(priv->mac_addr)) {
+ /* Use random MAC if still none obtained. */
+ eth_hw_addr_random(ndev);
+ memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len);
+ dev_warn(&pdev->dev, "using random MAC addr: %pM\n",
+ priv->mac_addr);
+ }
+
+ ndev->netdev_ops = &emac_netdev_ops;
+ ndev->ethtool_ops = &ethtool_ops;
+ netif_napi_add(ndev, &priv->napi, emac_poll);
+
+ pm_runtime_enable(&pdev->dev);
+ rc = pm_runtime_resume_and_get(&pdev->dev);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, rc);
+ goto err_napi_del;
+ }
+
+ /* register the network device */
+ SET_NETDEV_DEV(ndev, &pdev->dev);
+ rc = register_netdev(ndev);
+ if (rc) {
+ dev_err(&pdev->dev, "error in register_netdev\n");
+ rc = -ENODEV;
+ pm_runtime_put(&pdev->dev);
+ goto err_napi_del;
+ }
+
+
+ if (netif_msg_probe(priv)) {
+ dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
+ "(regs: %pa, irq: %d)\n",
+ &priv->emac_base_phys, ndev->irq);
+ }
+ pm_runtime_put(&pdev->dev);
+
+ return 0;
+
+err_napi_del:
+ netif_napi_del(&priv->napi);
+err_free_rxchan:
+ cpdma_chan_destroy(priv->rxchan);
+err_free_txchan:
+ cpdma_chan_destroy(priv->txchan);
+err_free_dma:
+ cpdma_ctlr_destroy(priv->dma);
+no_pdata:
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ of_node_put(priv->phy_node);
+err_free_netdev:
+ free_netdev(ndev);
+ return rc;
+}
+
+/**
+ * davinci_emac_remove - EMAC device remove
+ * @pdev: The DaVinci EMAC device that we are removing
+ *
+ * Called when removing the device driver. We disable clock usage and release
+ * the resources taken up by the driver and unregister network device
+ */
+static int davinci_emac_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct emac_priv *priv = netdev_priv(ndev);
+ struct device_node *np = pdev->dev.of_node;
+
+ dev_notice(&ndev->dev, "DaVinci EMAC: davinci_emac_remove()\n");
+
+ if (priv->txchan)
+ cpdma_chan_destroy(priv->txchan);
+ if (priv->rxchan)
+ cpdma_chan_destroy(priv->rxchan);
+ cpdma_ctlr_destroy(priv->dma);
+
+ unregister_netdev(ndev);
+ of_node_put(priv->phy_node);
+ pm_runtime_disable(&pdev->dev);
+ if (of_phy_is_fixed_link(np))
+ of_phy_deregister_fixed_link(np);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static int davinci_emac_suspend(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (netif_running(ndev))
+ emac_dev_stop(ndev);
+
+ return 0;
+}
+
+static int davinci_emac_resume(struct device *dev)
+{
+ struct net_device *ndev = dev_get_drvdata(dev);
+
+ if (netif_running(ndev))
+ emac_dev_open(ndev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops davinci_emac_pm_ops = {
+ .suspend = davinci_emac_suspend,
+ .resume = davinci_emac_resume,
+};
+
+static const struct emac_platform_data am3517_emac_data = {
+ .version = EMAC_VERSION_2,
+ .hw_ram_addr = 0x01e20000,
+};
+
+static const struct emac_platform_data dm816_emac_data = {
+ .version = EMAC_VERSION_2,
+};
+
+static const struct of_device_id davinci_emac_of_match[] = {
+ {.compatible = "ti,davinci-dm6467-emac", },
+ {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
+ {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, },
+ {},
+};
+MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
+
+/* davinci_emac_driver: EMAC platform driver structure */
+static struct platform_driver davinci_emac_driver = {
+ .driver = {
+ .name = "davinci_emac",
+ .pm = &davinci_emac_pm_ops,
+ .of_match_table = davinci_emac_of_match,
+ },
+ .probe = davinci_emac_probe,
+ .remove = davinci_emac_remove,
+};
+
+/**
+ * davinci_emac_init - EMAC driver module init
+ *
+ * Called when initializing the driver. We register the driver with
+ * the platform.
+ */
+static int __init davinci_emac_init(void)
+{
+ return platform_driver_register(&davinci_emac_driver);
+}
+late_initcall(davinci_emac_init);
+
+/**
+ * davinci_emac_exit - EMAC driver module exit
+ *
+ * Called when exiting the driver completely. We unregister the driver with
+ * the platform and exit
+ */
+static void __exit davinci_emac_exit(void)
+{
+ platform_driver_unregister(&davinci_emac_driver);
+}
+module_exit(davinci_emac_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Anant Gole <anantgole@ti.com>");
+MODULE_AUTHOR("DaVinci EMAC Maintainer: Chaithrika U S <chaithrika@ti.com>");
+MODULE_DESCRIPTION("DaVinci EMAC Ethernet driver");
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
new file mode 100644
index 000000000..946b9753c
--- /dev/null
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -0,0 +1,750 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DaVinci MDIO Module driver
+ *
+ * Copyright (C) 2010 Texas Instruments.
+ *
+ * Shamelessly ripped out of davinci_emac.c, original copyrights follow:
+ *
+ * Copyright (C) 2009 Texas Instruments.
+ *
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/pm_runtime.h>
+#include <linux/davinci_emac.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/mdio-bitbang.h>
+#include <linux/sys_soc.h>
+
+/*
+ * This timeout definition is a worst-case ultra defensive measure against
+ * unexpected controller lock ups. Ideally, we should never ever hit this
+ * scenario in practice.
+ */
+#define MDIO_TIMEOUT 100 /* msecs */
+
+#define PHY_REG_MASK 0x1f
+#define PHY_ID_MASK 0x1f
+
+#define DEF_OUT_FREQ 2200000 /* 2.2 MHz */
+
+struct davinci_mdio_of_param {
+ int autosuspend_delay_ms;
+ bool manual_mode;
+};
+
+struct davinci_mdio_regs {
+ u32 version;
+ u32 control;
+#define CONTROL_IDLE BIT(31)
+#define CONTROL_ENABLE BIT(30)
+#define CONTROL_MAX_DIV (0xffff)
+#define CONTROL_CLKDIV GENMASK(15, 0)
+
+#define MDIO_MAN_MDCLK_O BIT(2)
+#define MDIO_MAN_OE BIT(1)
+#define MDIO_MAN_PIN BIT(0)
+#define MDIO_MANUALMODE BIT(31)
+
+#define MDIO_PIN 0
+
+
+ u32 alive;
+ u32 link;
+ u32 linkintraw;
+ u32 linkintmasked;
+ u32 __reserved_0[2];
+ u32 userintraw;
+ u32 userintmasked;
+ u32 userintmaskset;
+ u32 userintmaskclr;
+ u32 manualif;
+ u32 poll;
+ u32 __reserved_1[18];
+
+ struct {
+ u32 access;
+#define USERACCESS_GO BIT(31)
+#define USERACCESS_WRITE BIT(30)
+#define USERACCESS_ACK BIT(29)
+#define USERACCESS_READ (0)
+#define USERACCESS_DATA (0xffff)
+
+ u32 physel;
+ } user[];
+};
+
+static const struct mdio_platform_data default_pdata = {
+ .bus_freq = DEF_OUT_FREQ,
+};
+
+struct davinci_mdio_data {
+ struct mdio_platform_data pdata;
+ struct mdiobb_ctrl bb_ctrl;
+ struct davinci_mdio_regs __iomem *regs;
+ struct clk *clk;
+ struct device *dev;
+ struct mii_bus *bus;
+ bool active_in_suspend;
+ unsigned long access_time; /* jiffies */
+ /* Indicates that driver shouldn't modify phy_mask in case
+ * if MDIO bus is registered from DT.
+ */
+ bool skip_scan;
+ u32 clk_div;
+ bool manual_mode;
+};
+
+static void davinci_mdio_init_clk(struct davinci_mdio_data *data)
+{
+ u32 mdio_in, div, mdio_out_khz, access_time;
+
+ mdio_in = clk_get_rate(data->clk);
+ div = (mdio_in / data->pdata.bus_freq) - 1;
+ if (div > CONTROL_MAX_DIV)
+ div = CONTROL_MAX_DIV;
+
+ data->clk_div = div;
+ /*
+ * One mdio transaction consists of:
+ * 32 bits of preamble
+ * 32 bits of transferred data
+ * 24 bits of bus yield (not needed unless shared?)
+ */
+ mdio_out_khz = mdio_in / (1000 * (div + 1));
+ access_time = (88 * 1000) / mdio_out_khz;
+
+ /*
+ * In the worst case, we could be kicking off a user-access immediately
+ * after the mdio bus scan state-machine triggered its own read. If
+ * so, our request could get deferred by one access cycle. We
+ * defensively allow for 4 access cycles.
+ */
+ data->access_time = usecs_to_jiffies(access_time * 4);
+ if (!data->access_time)
+ data->access_time = 1;
+}
+
+static void davinci_mdio_enable(struct davinci_mdio_data *data)
+{
+ /* set enable and clock divider */
+ writel(data->clk_div | CONTROL_ENABLE, &data->regs->control);
+}
+
+static void davinci_mdio_disable(struct davinci_mdio_data *data)
+{
+ u32 reg;
+
+ /* Disable MDIO state machine */
+ reg = readl(&data->regs->control);
+
+ reg &= ~CONTROL_CLKDIV;
+ reg |= data->clk_div;
+
+ reg &= ~CONTROL_ENABLE;
+ writel(reg, &data->regs->control);
+}
+
+static void davinci_mdio_enable_manual_mode(struct davinci_mdio_data *data)
+{
+ u32 reg;
+ /* set manual mode */
+ reg = readl(&data->regs->poll);
+ reg |= MDIO_MANUALMODE;
+ writel(reg, &data->regs->poll);
+}
+
+static void davinci_set_mdc(struct mdiobb_ctrl *ctrl, int level)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (level)
+ reg |= MDIO_MAN_MDCLK_O;
+ else
+ reg &= ~MDIO_MAN_MDCLK_O;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_dir(struct mdiobb_ctrl *ctrl, int output)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (output)
+ reg |= MDIO_MAN_OE;
+ else
+ reg &= ~MDIO_MAN_OE;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static void davinci_set_mdio_data(struct mdiobb_ctrl *ctrl, int value)
+{
+ struct davinci_mdio_data *data;
+ u32 reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+
+ if (value)
+ reg |= MDIO_MAN_PIN;
+ else
+ reg &= ~MDIO_MAN_PIN;
+
+ writel(reg, &data->regs->manualif);
+}
+
+static int davinci_get_mdio_data(struct mdiobb_ctrl *ctrl)
+{
+ struct davinci_mdio_data *data;
+ unsigned long reg;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+ reg = readl(&data->regs->manualif);
+ return test_bit(MDIO_PIN, &reg);
+}
+
+static int davinci_mdiobb_read(struct mii_bus *bus, int phy, int reg)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_read(bus, phy, reg);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdiobb_write(struct mii_bus *bus, int phy, int reg,
+ u16 val)
+{
+ int ret;
+
+ ret = pm_runtime_resume_and_get(bus->parent);
+ if (ret < 0)
+ return ret;
+
+ ret = mdiobb_write(bus, phy, reg, val);
+
+ pm_runtime_mark_last_busy(bus->parent);
+ pm_runtime_put_autosuspend(bus->parent);
+
+ return ret;
+}
+
+static int davinci_mdio_common_reset(struct davinci_mdio_data *data)
+{
+ u32 phy_mask, ver;
+ int ret;
+
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
+ return ret;
+
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
+ }
+
+ /* wait for scan logic to settle */
+ msleep(PHY_MAX_ADDR * data->access_time);
+
+ /* dump hardware version info */
+ ver = readl(&data->regs->version);
+ dev_info(data->dev,
+ "davinci mdio revision %d.%d, bus freq %ld\n",
+ (ver >> 8) & 0xff, ver & 0xff,
+ data->pdata.bus_freq);
+
+ if (data->skip_scan)
+ goto done;
+
+ /* get phy mask from the alive register */
+ phy_mask = readl(&data->regs->alive);
+ if (phy_mask) {
+ /* restrict mdio bus to live phys only */
+ dev_info(data->dev, "detected phy mask %x\n", ~phy_mask);
+ phy_mask = ~phy_mask;
+ } else {
+ /* desperately scan all phys */
+ dev_warn(data->dev, "no live phy, scanning all\n");
+ phy_mask = 0;
+ }
+ data->bus->phy_mask = phy_mask;
+
+done:
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+
+ return 0;
+}
+
+static int davinci_mdio_reset(struct mii_bus *bus)
+{
+ struct davinci_mdio_data *data = bus->priv;
+
+ return davinci_mdio_common_reset(data);
+}
+
+static int davinci_mdiobb_reset(struct mii_bus *bus)
+{
+ struct mdiobb_ctrl *ctrl = bus->priv;
+ struct davinci_mdio_data *data;
+
+ data = container_of(ctrl, struct davinci_mdio_data, bb_ctrl);
+
+ return davinci_mdio_common_reset(data);
+}
+
+/* wait until hardware is ready for another user access */
+static inline int wait_for_user_access(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ unsigned long timeout = jiffies + msecs_to_jiffies(MDIO_TIMEOUT);
+ u32 reg;
+
+ while (time_after(timeout, jiffies)) {
+ reg = readl(&regs->user[0].access);
+ if ((reg & USERACCESS_GO) == 0)
+ return 0;
+
+ reg = readl(&regs->control);
+ if ((reg & CONTROL_IDLE) == 0) {
+ usleep_range(100, 200);
+ continue;
+ }
+
+ /*
+ * An emac soft_reset may have clobbered the mdio controller's
+ * state machine. We need to reset and retry the current
+ * operation
+ */
+ dev_warn(data->dev, "resetting idled controller\n");
+ davinci_mdio_enable(data);
+ return -EAGAIN;
+ }
+
+ reg = readl(&regs->user[0].access);
+ if ((reg & USERACCESS_GO) == 0)
+ return 0;
+
+ dev_err(data->dev, "timed out waiting for user access\n");
+ return -ETIMEDOUT;
+}
+
+/* wait until hardware state machine is idle */
+static inline int wait_for_idle(struct davinci_mdio_data *data)
+{
+ struct davinci_mdio_regs __iomem *regs = data->regs;
+ u32 val, ret;
+
+ ret = readl_poll_timeout(&regs->control, val, val & CONTROL_IDLE,
+ 0, MDIO_TIMEOUT * 1000);
+ if (ret)
+ dev_err(data->dev, "timed out waiting for idle\n");
+
+ return ret;
+}
+
+static int davinci_mdio_read(struct mii_bus *bus, int phy_id, int phy_reg)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
+ return ret;
+
+ reg = (USERACCESS_GO | USERACCESS_READ | (phy_reg << 21) |
+ (phy_id << 16));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ reg = readl(&data->regs->user[0].access);
+ ret = (reg & USERACCESS_ACK) ? (reg & USERACCESS_DATA) : -EIO;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+ return ret;
+}
+
+static int davinci_mdio_write(struct mii_bus *bus, int phy_id,
+ int phy_reg, u16 phy_data)
+{
+ struct davinci_mdio_data *data = bus->priv;
+ u32 reg;
+ int ret;
+
+ if (phy_reg & ~PHY_REG_MASK || phy_id & ~PHY_ID_MASK)
+ return -EINVAL;
+
+ ret = pm_runtime_resume_and_get(data->dev);
+ if (ret < 0)
+ return ret;
+
+ reg = (USERACCESS_GO | USERACCESS_WRITE | (phy_reg << 21) |
+ (phy_id << 16) | (phy_data & USERACCESS_DATA));
+
+ while (1) {
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ if (ret < 0)
+ break;
+
+ writel(reg, &data->regs->user[0].access);
+
+ ret = wait_for_user_access(data);
+ if (ret == -EAGAIN)
+ continue;
+ break;
+ }
+
+ pm_runtime_mark_last_busy(data->dev);
+ pm_runtime_put_autosuspend(data->dev);
+
+ return ret;
+}
+
+static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
+ struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ u32 prop;
+
+ if (!node)
+ return -EINVAL;
+
+ if (of_property_read_u32(node, "bus_freq", &prop)) {
+ dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
+ return -EINVAL;
+ }
+ data->bus_freq = prop;
+
+ return 0;
+}
+
+struct k3_mdio_soc_data {
+ bool manual_mode;
+};
+
+static const struct k3_mdio_soc_data am65_mdio_soc_data = {
+ .manual_mode = true,
+};
+
+static const struct soc_device_attribute k3_mdio_socinfo[] = {
+ { .family = "AM62X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM64X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "AM65X", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J7200", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR1.0", .data = &am65_mdio_soc_data },
+ { .family = "J721E", .revision = "SR2.0", .data = &am65_mdio_soc_data },
+ { .family = "J721S2", .revision = "SR1.0", .data = &am65_mdio_soc_data},
+ { /* sentinel */ },
+};
+
+#if IS_ENABLED(CONFIG_OF)
+static const struct davinci_mdio_of_param of_cpsw_mdio_data = {
+ .autosuspend_delay_ms = 100,
+};
+
+static const struct of_device_id davinci_mdio_of_mtable[] = {
+ { .compatible = "ti,davinci_mdio", },
+ { .compatible = "ti,cpsw-mdio", .data = &of_cpsw_mdio_data},
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, davinci_mdio_of_mtable);
+#endif
+
+static const struct mdiobb_ops davinci_mdiobb_ops = {
+ .owner = THIS_MODULE,
+ .set_mdc = davinci_set_mdc,
+ .set_mdio_dir = davinci_set_mdio_dir,
+ .set_mdio_data = davinci_set_mdio_data,
+ .get_mdio_data = davinci_get_mdio_data,
+};
+
+static int davinci_mdio_probe(struct platform_device *pdev)
+{
+ struct mdio_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ struct device *dev = &pdev->dev;
+ struct davinci_mdio_data *data;
+ struct resource *res;
+ struct phy_device *phy;
+ int ret, addr;
+ int autosuspend_delay_ms = -1;
+
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->manual_mode = false;
+ data->bb_ctrl.ops = &davinci_mdiobb_ops;
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ const struct soc_device_attribute *soc_match_data;
+
+ soc_match_data = soc_device_match(k3_mdio_socinfo);
+ if (soc_match_data && soc_match_data->data) {
+ const struct k3_mdio_soc_data *socdata =
+ soc_match_data->data;
+
+ data->manual_mode = socdata->manual_mode;
+ }
+ }
+
+ if (data->manual_mode)
+ data->bus = alloc_mdio_bitbang(&data->bb_ctrl);
+ else
+ data->bus = devm_mdiobus_alloc(dev);
+
+ if (!data->bus) {
+ dev_err(dev, "failed to alloc mii bus\n");
+ return -ENOMEM;
+ }
+
+ if (IS_ENABLED(CONFIG_OF) && dev->of_node) {
+ const struct davinci_mdio_of_param *of_mdio_data;
+
+ ret = davinci_mdio_probe_dt(&data->pdata, pdev);
+ if (ret)
+ return ret;
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s", pdev->name);
+
+ of_mdio_data = of_device_get_match_data(&pdev->dev);
+ if (of_mdio_data) {
+ autosuspend_delay_ms =
+ of_mdio_data->autosuspend_delay_ms;
+ }
+ } else {
+ data->pdata = pdata ? (*pdata) : default_pdata;
+ snprintf(data->bus->id, MII_BUS_ID_SIZE, "%s-%x",
+ pdev->name, pdev->id);
+ }
+
+ data->bus->name = dev_name(dev);
+
+ if (data->manual_mode) {
+ data->bus->read = davinci_mdiobb_read;
+ data->bus->write = davinci_mdiobb_write;
+ data->bus->reset = davinci_mdiobb_reset;
+
+ dev_info(dev, "Configuring MDIO in manual mode\n");
+ } else {
+ data->bus->read = davinci_mdio_read;
+ data->bus->write = davinci_mdio_write;
+ data->bus->reset = davinci_mdio_reset;
+ data->bus->priv = data;
+ }
+ data->bus->parent = dev;
+
+ data->clk = devm_clk_get(dev, "fck");
+ if (IS_ERR(data->clk)) {
+ dev_err(dev, "failed to get device clock\n");
+ return PTR_ERR(data->clk);
+ }
+
+ dev_set_drvdata(dev, data);
+ data->dev = dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return -EINVAL;
+ data->regs = devm_ioremap(dev, res->start, resource_size(res));
+ if (!data->regs)
+ return -ENOMEM;
+
+ davinci_mdio_init_clk(data);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, autosuspend_delay_ms);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* register the mii bus
+ * Create PHYs from DT only in case if PHY child nodes are explicitly
+ * defined to support backward compatibility with DTs which assume that
+ * Davinci MDIO will always scan the bus for PHYs detection.
+ */
+ if (dev->of_node && of_get_child_count(dev->of_node))
+ data->skip_scan = true;
+
+ ret = of_mdiobus_register(data->bus, dev->of_node);
+ if (ret)
+ goto bail_out;
+
+ /* scan and dump the bus */
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++) {
+ phy = mdiobus_get_phy(data->bus, addr);
+ if (phy) {
+ dev_info(dev, "phy[%d]: device %s, driver %s\n",
+ phy->mdio.addr, phydev_name(phy),
+ phy->drv ? phy->drv->name : "unknown");
+ }
+ }
+
+ return 0;
+
+bail_out:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+}
+
+static int davinci_mdio_remove(struct platform_device *pdev)
+{
+ struct davinci_mdio_data *data = platform_get_drvdata(pdev);
+
+ if (data->bus) {
+ mdiobus_unregister(data->bus);
+
+ if (data->manual_mode)
+ free_mdio_bitbang(data->bus);
+ }
+
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int davinci_mdio_runtime_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ u32 ctrl;
+
+ /* shutdown the scan state machine */
+ ctrl = readl(&data->regs->control);
+ ctrl &= ~CONTROL_ENABLE;
+ writel(ctrl, &data->regs->control);
+
+ if (!data->manual_mode)
+ wait_for_idle(data);
+
+ return 0;
+}
+
+static int davinci_mdio_runtime_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ if (data->manual_mode) {
+ davinci_mdio_disable(data);
+ davinci_mdio_enable_manual_mode(data);
+ } else {
+ davinci_mdio_enable(data);
+ }
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int davinci_mdio_suspend(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+ int ret = 0;
+
+ data->active_in_suspend = !pm_runtime_status_suspended(dev);
+ if (data->active_in_suspend)
+ ret = pm_runtime_force_suspend(dev);
+ if (ret < 0)
+ return ret;
+
+ /* Select sleep pin state */
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int davinci_mdio_resume(struct device *dev)
+{
+ struct davinci_mdio_data *data = dev_get_drvdata(dev);
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(dev);
+
+ if (data->active_in_suspend)
+ pm_runtime_force_resume(dev);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops davinci_mdio_pm_ops = {
+ SET_RUNTIME_PM_OPS(davinci_mdio_runtime_suspend,
+ davinci_mdio_runtime_resume, NULL)
+ SET_LATE_SYSTEM_SLEEP_PM_OPS(davinci_mdio_suspend, davinci_mdio_resume)
+};
+
+static struct platform_driver davinci_mdio_driver = {
+ .driver = {
+ .name = "davinci_mdio",
+ .pm = &davinci_mdio_pm_ops,
+ .of_match_table = of_match_ptr(davinci_mdio_of_mtable),
+ },
+ .probe = davinci_mdio_probe,
+ .remove = davinci_mdio_remove,
+};
+
+static int __init davinci_mdio_init(void)
+{
+ return platform_driver_register(&davinci_mdio_driver);
+}
+device_initcall(davinci_mdio_init);
+
+static void __exit davinci_mdio_exit(void)
+{
+ platform_driver_unregister(&davinci_mdio_driver);
+}
+module_exit(davinci_mdio_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("DaVinci MDIO driver");
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.c b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
new file mode 100644
index 000000000..38cc12f9f
--- /dev/null
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: GPL-2.0
+/* TI K3 CPPI5 descriptors pool API
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/kernel.h>
+
+#include "k3-cppi-desc-pool.h"
+
+struct k3_cppi_desc_pool {
+ struct device *dev;
+ dma_addr_t dma_addr;
+ void *cpumem; /* dma_alloc map */
+ size_t desc_size;
+ size_t mem_size;
+ size_t num_desc;
+ struct gen_pool *gen_pool;
+};
+
+void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool)
+{
+ if (!pool)
+ return;
+
+ WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
+ "k3_knav_desc_pool size %zu != avail %zu",
+ gen_pool_size(pool->gen_pool),
+ gen_pool_avail(pool->gen_pool));
+ if (pool->cpumem)
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
+ pool->dma_addr);
+
+ gen_pool_destroy(pool->gen_pool); /* frees pool->name */
+}
+
+struct k3_cppi_desc_pool *
+k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
+ size_t desc_size,
+ const char *name)
+{
+ struct k3_cppi_desc_pool *pool;
+ const char *pool_name = NULL;
+ int ret = -ENOMEM;
+
+ pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(ret);
+
+ pool->dev = dev;
+ pool->desc_size = roundup_pow_of_two(desc_size);
+ pool->num_desc = size;
+ pool->mem_size = pool->num_desc * pool->desc_size;
+
+ pool_name = kstrdup_const(name ? name : dev_name(pool->dev),
+ GFP_KERNEL);
+ if (!pool_name)
+ return ERR_PTR(-ENOMEM);
+
+ pool->gen_pool = gen_pool_create(ilog2(pool->desc_size), -1);
+ if (!pool->gen_pool) {
+ ret = -ENOMEM;
+ dev_err(pool->dev, "pool create failed %d\n", ret);
+ kfree_const(pool_name);
+ goto gen_pool_create_fail;
+ }
+
+ pool->gen_pool->name = pool_name;
+
+ pool->cpumem = dma_alloc_coherent(pool->dev, pool->mem_size,
+ &pool->dma_addr, GFP_KERNEL);
+
+ if (!pool->cpumem)
+ goto dma_alloc_fail;
+
+ ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->cpumem,
+ (phys_addr_t)pool->dma_addr, pool->mem_size,
+ -1);
+ if (ret < 0) {
+ dev_err(pool->dev, "pool add failed %d\n", ret);
+ goto gen_pool_add_virt_fail;
+ }
+
+ return pool;
+
+gen_pool_add_virt_fail:
+ dma_free_coherent(pool->dev, pool->mem_size, pool->cpumem,
+ pool->dma_addr);
+dma_alloc_fail:
+ gen_pool_destroy(pool->gen_pool); /* frees pool->name */
+gen_pool_create_fail:
+ devm_kfree(pool->dev, pool);
+ return ERR_PTR(ret);
+}
+
+dma_addr_t k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool,
+ void *addr)
+{
+ return addr ? pool->dma_addr + (addr - pool->cpumem) : 0;
+}
+
+void *k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma)
+{
+ return dma ? pool->cpumem + (dma - pool->dma_addr) : NULL;
+}
+
+void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool)
+{
+ return (void *)gen_pool_alloc(pool->gen_pool, pool->desc_size);
+}
+
+void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr)
+{
+ gen_pool_free(pool->gen_pool, (unsigned long)addr, pool->desc_size);
+}
+
+size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool)
+{
+ return gen_pool_avail(pool->gen_pool) / pool->desc_size;
+}
diff --git a/drivers/net/ethernet/ti/k3-cppi-desc-pool.h b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
new file mode 100644
index 000000000..a7e3fa5e7
--- /dev/null
+++ b/drivers/net/ethernet/ti/k3-cppi-desc-pool.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* TI K3 CPPI5 descriptors pool
+ *
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_CPPI_DESC_POOL_H_
+#define K3_CPPI_DESC_POOL_H_
+
+#include <linux/device.h>
+#include <linux/types.h>
+
+struct k3_cppi_desc_pool;
+
+void k3_cppi_desc_pool_destroy(struct k3_cppi_desc_pool *pool);
+struct k3_cppi_desc_pool *
+k3_cppi_desc_pool_create_name(struct device *dev, size_t size,
+ size_t desc_size,
+ const char *name);
+#define k3_cppi_desc_pool_create(dev, size, desc_size) \
+ k3_cppi_desc_pool_create_name(dev, size, desc_size, NULL)
+dma_addr_t
+k3_cppi_desc_pool_virt2dma(struct k3_cppi_desc_pool *pool, void *addr);
+void *
+k3_cppi_desc_pool_dma2virt(struct k3_cppi_desc_pool *pool, dma_addr_t dma);
+void *k3_cppi_desc_pool_alloc(struct k3_cppi_desc_pool *pool);
+void k3_cppi_desc_pool_free(struct k3_cppi_desc_pool *pool, void *addr);
+size_t k3_cppi_desc_pool_avail(struct k3_cppi_desc_pool *pool);
+
+#endif /* K3_CPPI_DESC_POOL_H_ */
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
new file mode 100644
index 000000000..43d5cd59b
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -0,0 +1,248 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * NetCP driver local header
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Sandeep Paulraj <s-paulraj@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Wingman Kwok <w-kwok2@ti.com>
+ * Murali Karicheri <m-karicheri2@ti.com>
+ */
+#ifndef __NETCP_H__
+#define __NETCP_H__
+
+#include <linux/netdevice.h>
+#include <linux/soc/ti/knav_dma.h>
+#include <linux/u64_stats_sync.h>
+
+/* Maximum Ethernet frame size supported by Keystone switch */
+#define NETCP_MAX_FRAME_SIZE 9504
+
+#define SGMII_LINK_MAC_MAC_AUTONEG 0
+#define SGMII_LINK_MAC_PHY 1
+#define SGMII_LINK_MAC_MAC_FORCED 2
+#define SGMII_LINK_MAC_FIBER 3
+#define SGMII_LINK_MAC_PHY_NO_MDIO 4
+#define RGMII_LINK_MAC_PHY 5
+#define RGMII_LINK_MAC_PHY_NO_MDIO 7
+#define XGMII_LINK_MAC_PHY 10
+#define XGMII_LINK_MAC_MAC_FORCED 11
+
+struct netcp_device;
+
+struct netcp_tx_pipe {
+ struct netcp_device *netcp_device;
+ void *dma_queue;
+ unsigned int dma_queue_id;
+ /* To port for packet forwarded to switch. Used only by ethss */
+ u8 switch_to_port;
+#define SWITCH_TO_PORT_IN_TAGINFO BIT(0)
+ u8 flags;
+ void *dma_channel;
+ const char *dma_chan_name;
+};
+
+#define ADDR_NEW BIT(0)
+#define ADDR_VALID BIT(1)
+
+enum netcp_addr_type {
+ ADDR_ANY,
+ ADDR_DEV,
+ ADDR_UCAST,
+ ADDR_MCAST,
+ ADDR_BCAST
+};
+
+struct netcp_addr {
+ struct netcp_intf *netcp;
+ unsigned char addr[ETH_ALEN];
+ enum netcp_addr_type type;
+ unsigned int flags;
+ struct list_head node;
+};
+
+struct netcp_stats {
+ struct u64_stats_sync syncp_rx ____cacheline_aligned_in_smp;
+ u64 rx_packets;
+ u64 rx_bytes;
+ u32 rx_errors;
+ u32 rx_dropped;
+
+ struct u64_stats_sync syncp_tx ____cacheline_aligned_in_smp;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u32 tx_errors;
+ u32 tx_dropped;
+};
+
+struct netcp_intf {
+ struct device *dev;
+ struct device *ndev_dev;
+ struct net_device *ndev;
+ bool big_endian;
+ unsigned int tx_compl_qid;
+ void *tx_pool;
+ struct list_head txhook_list_head;
+ unsigned int tx_pause_threshold;
+ void *tx_compl_q;
+
+ unsigned int tx_resume_threshold;
+ void *rx_queue;
+ void *rx_pool;
+ struct list_head rxhook_list_head;
+ unsigned int rx_queue_id;
+ void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
+ struct napi_struct rx_napi;
+ struct napi_struct tx_napi;
+#define ETH_SW_CAN_REMOVE_ETH_FCS BIT(0)
+ u32 hw_cap;
+
+ /* 64-bit netcp stats */
+ struct netcp_stats stats;
+
+ void *rx_channel;
+ const char *dma_chan_name;
+ u32 rx_pool_size;
+ u32 rx_pool_region_id;
+ u32 tx_pool_size;
+ u32 tx_pool_region_id;
+ struct list_head module_head;
+ struct list_head interface_list;
+ struct list_head addr_list;
+ bool netdev_registered;
+ bool primary_module_attached;
+
+ /* Lock used for protecting Rx/Tx hook list management */
+ spinlock_t lock;
+ struct netcp_device *netcp_device;
+ struct device_node *node_interface;
+
+ /* DMA configuration data */
+ u32 msg_enable;
+ u32 rx_queue_depths[KNAV_DMA_FDQ_PER_CHAN];
+};
+
+#define NETCP_PSDATA_LEN KNAV_DMA_NUM_PS_WORDS
+struct netcp_packet {
+ struct sk_buff *skb;
+ __le32 *epib;
+ u32 *psdata;
+ u32 eflags;
+ unsigned int psdata_len;
+ struct netcp_intf *netcp;
+ struct netcp_tx_pipe *tx_pipe;
+ bool rxtstamp_complete;
+ void *ts_context;
+
+ void (*txtstamp)(void *ctx, struct sk_buff *skb);
+};
+
+static inline u32 *netcp_push_psdata(struct netcp_packet *p_info,
+ unsigned int bytes)
+{
+ u32 *buf;
+ unsigned int words;
+
+ if ((bytes & 0x03) != 0)
+ return NULL;
+ words = bytes >> 2;
+
+ if ((p_info->psdata_len + words) > NETCP_PSDATA_LEN)
+ return NULL;
+
+ p_info->psdata_len += words;
+ buf = &p_info->psdata[NETCP_PSDATA_LEN - p_info->psdata_len];
+ return buf;
+}
+
+static inline int netcp_align_psdata(struct netcp_packet *p_info,
+ unsigned int byte_align)
+{
+ int padding;
+
+ switch (byte_align) {
+ case 0:
+ padding = -EINVAL;
+ break;
+ case 1:
+ case 2:
+ case 4:
+ padding = 0;
+ break;
+ case 8:
+ padding = (p_info->psdata_len << 2) % 8;
+ break;
+ case 16:
+ padding = (p_info->psdata_len << 2) % 16;
+ break;
+ default:
+ padding = (p_info->psdata_len << 2) % byte_align;
+ break;
+ }
+ return padding;
+}
+
+struct netcp_module {
+ const char *name;
+ struct module *owner;
+ bool primary;
+
+ /* probe/remove: called once per NETCP instance */
+ int (*probe)(struct netcp_device *netcp_device,
+ struct device *device, struct device_node *node,
+ void **inst_priv);
+ int (*remove)(struct netcp_device *netcp_device, void *inst_priv);
+
+ /* attach/release: called once per network interface */
+ int (*attach)(void *inst_priv, struct net_device *ndev,
+ struct device_node *node, void **intf_priv);
+ int (*release)(void *intf_priv);
+ int (*open)(void *intf_priv, struct net_device *ndev);
+ int (*close)(void *intf_priv, struct net_device *ndev);
+ int (*add_addr)(void *intf_priv, struct netcp_addr *naddr);
+ int (*del_addr)(void *intf_priv, struct netcp_addr *naddr);
+ int (*add_vid)(void *intf_priv, int vid);
+ int (*del_vid)(void *intf_priv, int vid);
+ int (*ioctl)(void *intf_priv, struct ifreq *req, int cmd);
+ int (*set_rx_mode)(void *intf_priv, bool promisc);
+
+ /* used internally */
+ struct list_head module_list;
+ struct list_head interface_list;
+};
+
+int netcp_register_module(struct netcp_module *module);
+void netcp_unregister_module(struct netcp_module *module);
+void *netcp_module_get_intf_data(struct netcp_module *module,
+ struct netcp_intf *intf);
+
+int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
+ struct netcp_device *netcp_device,
+ const char *dma_chan_name, unsigned int dma_queue_id);
+int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe);
+int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe);
+
+typedef int netcp_hook_rtn(int order, void *data, struct netcp_packet *packet);
+int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data);
+int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data);
+void *netcp_device_find_module(struct netcp_device *netcp_device,
+ const char *name);
+
+/* SGMII functions */
+int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
+int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
+int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
+
+/* XGBE SERDES init functions */
+int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs);
+
+#endif /* __NETCP_H__ */
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
new file mode 100644
index 000000000..9eb9eaff4
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -0,0 +1,2280 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Keystone NetCP Core driver
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Sandeep Paulraj <s-paulraj@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Murali Karicheri <m-karicheri2@ti.com>
+ * Wingman Kwok <w-kwok2@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/if_vlan.h>
+#include <linux/pm_runtime.h>
+#include <linux/platform_device.h>
+#include <linux/soc/ti/knav_qmss.h>
+#include <linux/soc/ti/knav_dma.h>
+
+#include "netcp.h"
+
+#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
+#define NETCP_TX_TIMEOUT (5 * HZ)
+#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
+#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
+#define NETCP_MAX_MCAST_ADDR 16
+
+#define NETCP_EFUSE_REG_INDEX 0
+
+#define NETCP_MOD_PROBE_SKIPPED 1
+#define NETCP_MOD_PROBE_FAILED 2
+
+#define NETCP_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
+ NETIF_MSG_DRV | NETIF_MSG_LINK | \
+ NETIF_MSG_IFUP | NETIF_MSG_INTR | \
+ NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
+ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
+ NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
+ NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
+ NETIF_MSG_RX_STATUS)
+
+#define NETCP_EFUSE_ADDR_SWAP 2
+
+#define knav_queue_get_id(q) knav_queue_device_control(q, \
+ KNAV_QUEUE_GET_ID, (unsigned long)NULL)
+
+#define knav_queue_enable_notify(q) knav_queue_device_control(q, \
+ KNAV_QUEUE_ENABLE_NOTIFY, \
+ (unsigned long)NULL)
+
+#define knav_queue_disable_notify(q) knav_queue_device_control(q, \
+ KNAV_QUEUE_DISABLE_NOTIFY, \
+ (unsigned long)NULL)
+
+#define knav_queue_get_count(q) knav_queue_device_control(q, \
+ KNAV_QUEUE_GET_COUNT, (unsigned long)NULL)
+
+#define for_each_netcp_module(module) \
+ list_for_each_entry(module, &netcp_modules, module_list)
+
+#define for_each_netcp_device_module(netcp_device, inst_modpriv) \
+ list_for_each_entry(inst_modpriv, \
+ &((netcp_device)->modpriv_head), inst_list)
+
+#define for_each_module(netcp, intf_modpriv) \
+ list_for_each_entry(intf_modpriv, &netcp->module_head, intf_list)
+
+/* Module management structures */
+struct netcp_device {
+ struct list_head device_list;
+ struct list_head interface_head;
+ struct list_head modpriv_head;
+ struct device *device;
+};
+
+struct netcp_inst_modpriv {
+ struct netcp_device *netcp_device;
+ struct netcp_module *netcp_module;
+ struct list_head inst_list;
+ void *module_priv;
+};
+
+struct netcp_intf_modpriv {
+ struct netcp_intf *netcp_priv;
+ struct netcp_module *netcp_module;
+ struct list_head intf_list;
+ void *module_priv;
+};
+
+struct netcp_tx_cb {
+ void *ts_context;
+ void (*txtstamp)(void *context, struct sk_buff *skb);
+};
+
+static LIST_HEAD(netcp_devices);
+static LIST_HEAD(netcp_modules);
+static DEFINE_MUTEX(netcp_modules_lock);
+
+static int netcp_debug_level = -1;
+module_param(netcp_debug_level, int, 0);
+MODULE_PARM_DESC(netcp_debug_level, "Netcp debug level (NETIF_MSG bits) (0=none,...,16=all)");
+
+/* Helper functions - Get/Set */
+static void get_pkt_info(dma_addr_t *buff, u32 *buff_len, dma_addr_t *ndesc,
+ struct knav_dma_desc *desc)
+{
+ *buff_len = le32_to_cpu(desc->buff_len);
+ *buff = le32_to_cpu(desc->buff);
+ *ndesc = le32_to_cpu(desc->next_desc);
+}
+
+static void get_desc_info(u32 *desc_info, u32 *pkt_info,
+ struct knav_dma_desc *desc)
+{
+ *desc_info = le32_to_cpu(desc->desc_info);
+ *pkt_info = le32_to_cpu(desc->packet_info);
+}
+
+static u32 get_sw_data(int index, struct knav_dma_desc *desc)
+{
+ /* No Endian conversion needed as this data is untouched by hw */
+ return desc->sw_data[index];
+}
+
+/* use these macros to get sw data */
+#define GET_SW_DATA0(desc) get_sw_data(0, desc)
+#define GET_SW_DATA1(desc) get_sw_data(1, desc)
+#define GET_SW_DATA2(desc) get_sw_data(2, desc)
+#define GET_SW_DATA3(desc) get_sw_data(3, desc)
+
+static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
+ struct knav_dma_desc *desc)
+{
+ *buff = le32_to_cpu(desc->orig_buff);
+ *buff_len = le32_to_cpu(desc->orig_len);
+}
+
+static void get_words(dma_addr_t *words, int num_words, __le32 *desc)
+{
+ int i;
+
+ for (i = 0; i < num_words; i++)
+ words[i] = le32_to_cpu(desc[i]);
+}
+
+static void set_pkt_info(dma_addr_t buff, u32 buff_len, u32 ndesc,
+ struct knav_dma_desc *desc)
+{
+ desc->buff_len = cpu_to_le32(buff_len);
+ desc->buff = cpu_to_le32(buff);
+ desc->next_desc = cpu_to_le32(ndesc);
+}
+
+static void set_desc_info(u32 desc_info, u32 pkt_info,
+ struct knav_dma_desc *desc)
+{
+ desc->desc_info = cpu_to_le32(desc_info);
+ desc->packet_info = cpu_to_le32(pkt_info);
+}
+
+static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
+{
+ /* No Endian conversion needed as this data is untouched by hw */
+ desc->sw_data[index] = data;
+}
+
+/* use these macros to set sw data */
+#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
+#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
+#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
+#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
+
+static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
+ struct knav_dma_desc *desc)
+{
+ desc->orig_buff = cpu_to_le32(buff);
+ desc->orig_len = cpu_to_le32(buff_len);
+}
+
+static void set_words(u32 *words, int num_words, __le32 *desc)
+{
+ int i;
+
+ for (i = 0; i < num_words; i++)
+ desc[i] = cpu_to_le32(words[i]);
+}
+
+/* Read the e-fuse value as 32 bit values to be endian independent */
+static int emac_arch_get_mac_addr(char *x, void __iomem *efuse_mac, u32 swap)
+{
+ unsigned int addr0, addr1;
+
+ addr1 = readl(efuse_mac + 4);
+ addr0 = readl(efuse_mac);
+
+ switch (swap) {
+ case NETCP_EFUSE_ADDR_SWAP:
+ addr0 = addr1;
+ addr1 = readl(efuse_mac);
+ break;
+ default:
+ break;
+ }
+
+ x[0] = (addr1 & 0x0000ff00) >> 8;
+ x[1] = addr1 & 0x000000ff;
+ x[2] = (addr0 & 0xff000000) >> 24;
+ x[3] = (addr0 & 0x00ff0000) >> 16;
+ x[4] = (addr0 & 0x0000ff00) >> 8;
+ x[5] = addr0 & 0x000000ff;
+
+ return 0;
+}
+
+/* Module management routines */
+static int netcp_register_interface(struct netcp_intf *netcp)
+{
+ int ret;
+
+ ret = register_netdev(netcp->ndev);
+ if (!ret)
+ netcp->netdev_registered = true;
+ return ret;
+}
+
+static int netcp_module_probe(struct netcp_device *netcp_device,
+ struct netcp_module *module)
+{
+ struct device *dev = netcp_device->device;
+ struct device_node *devices, *interface, *node = dev->of_node;
+ struct device_node *child;
+ struct netcp_inst_modpriv *inst_modpriv;
+ struct netcp_intf *netcp_intf;
+ struct netcp_module *tmp;
+ bool primary_module_registered = false;
+ int ret;
+
+ /* Find this module in the sub-tree for this device */
+ devices = of_get_child_by_name(node, "netcp-devices");
+ if (!devices) {
+ dev_err(dev, "could not find netcp-devices node\n");
+ return NETCP_MOD_PROBE_SKIPPED;
+ }
+
+ for_each_available_child_of_node(devices, child) {
+ const char *name;
+ char node_name[32];
+
+ if (of_property_read_string(child, "label", &name) < 0) {
+ snprintf(node_name, sizeof(node_name), "%pOFn", child);
+ name = node_name;
+ }
+ if (!strcasecmp(module->name, name))
+ break;
+ }
+
+ of_node_put(devices);
+ /* If module not used for this device, skip it */
+ if (!child) {
+ dev_warn(dev, "module(%s) not used for device\n", module->name);
+ return NETCP_MOD_PROBE_SKIPPED;
+ }
+
+ inst_modpriv = devm_kzalloc(dev, sizeof(*inst_modpriv), GFP_KERNEL);
+ if (!inst_modpriv) {
+ of_node_put(child);
+ return -ENOMEM;
+ }
+
+ inst_modpriv->netcp_device = netcp_device;
+ inst_modpriv->netcp_module = module;
+ list_add_tail(&inst_modpriv->inst_list, &netcp_device->modpriv_head);
+
+ ret = module->probe(netcp_device, dev, child,
+ &inst_modpriv->module_priv);
+ of_node_put(child);
+ if (ret) {
+ dev_err(dev, "Probe of module(%s) failed with %d\n",
+ module->name, ret);
+ list_del(&inst_modpriv->inst_list);
+ devm_kfree(dev, inst_modpriv);
+ return NETCP_MOD_PROBE_FAILED;
+ }
+
+ /* Attach modules only if the primary module is probed */
+ for_each_netcp_module(tmp) {
+ if (tmp->primary)
+ primary_module_registered = true;
+ }
+
+ if (!primary_module_registered)
+ return 0;
+
+ /* Attach module to interfaces */
+ list_for_each_entry(netcp_intf, &netcp_device->interface_head,
+ interface_list) {
+ struct netcp_intf_modpriv *intf_modpriv;
+
+ intf_modpriv = devm_kzalloc(dev, sizeof(*intf_modpriv),
+ GFP_KERNEL);
+ if (!intf_modpriv)
+ return -ENOMEM;
+
+ interface = of_parse_phandle(netcp_intf->node_interface,
+ module->name, 0);
+
+ if (!interface) {
+ devm_kfree(dev, intf_modpriv);
+ continue;
+ }
+
+ intf_modpriv->netcp_priv = netcp_intf;
+ intf_modpriv->netcp_module = module;
+ list_add_tail(&intf_modpriv->intf_list,
+ &netcp_intf->module_head);
+
+ ret = module->attach(inst_modpriv->module_priv,
+ netcp_intf->ndev, interface,
+ &intf_modpriv->module_priv);
+ of_node_put(interface);
+ if (ret) {
+ dev_dbg(dev, "Attach of module %s declined with %d\n",
+ module->name, ret);
+ list_del(&intf_modpriv->intf_list);
+ devm_kfree(dev, intf_modpriv);
+ continue;
+ }
+ }
+
+ /* Now register the interface with netdev */
+ list_for_each_entry(netcp_intf,
+ &netcp_device->interface_head,
+ interface_list) {
+ /* If interface not registered then register now */
+ if (!netcp_intf->netdev_registered) {
+ ret = netcp_register_interface(netcp_intf);
+ if (ret)
+ return -ENODEV;
+ }
+ }
+ return 0;
+}
+
+int netcp_register_module(struct netcp_module *module)
+{
+ struct netcp_device *netcp_device;
+ struct netcp_module *tmp;
+ int ret;
+
+ if (!module->name) {
+ WARN(1, "error registering netcp module: no name\n");
+ return -EINVAL;
+ }
+
+ if (!module->probe) {
+ WARN(1, "error registering netcp module: no probe\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&netcp_modules_lock);
+
+ for_each_netcp_module(tmp) {
+ if (!strcasecmp(tmp->name, module->name)) {
+ mutex_unlock(&netcp_modules_lock);
+ return -EEXIST;
+ }
+ }
+ list_add_tail(&module->module_list, &netcp_modules);
+
+ list_for_each_entry(netcp_device, &netcp_devices, device_list) {
+ ret = netcp_module_probe(netcp_device, module);
+ if (ret < 0)
+ goto fail;
+ }
+ mutex_unlock(&netcp_modules_lock);
+ return 0;
+
+fail:
+ mutex_unlock(&netcp_modules_lock);
+ netcp_unregister_module(module);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(netcp_register_module);
+
+static void netcp_release_module(struct netcp_device *netcp_device,
+ struct netcp_module *module)
+{
+ struct netcp_inst_modpriv *inst_modpriv, *inst_tmp;
+ struct netcp_intf *netcp_intf, *netcp_tmp;
+ struct device *dev = netcp_device->device;
+
+ /* Release the module from each interface */
+ list_for_each_entry_safe(netcp_intf, netcp_tmp,
+ &netcp_device->interface_head,
+ interface_list) {
+ struct netcp_intf_modpriv *intf_modpriv, *intf_tmp;
+
+ list_for_each_entry_safe(intf_modpriv, intf_tmp,
+ &netcp_intf->module_head,
+ intf_list) {
+ if (intf_modpriv->netcp_module == module) {
+ module->release(intf_modpriv->module_priv);
+ list_del(&intf_modpriv->intf_list);
+ devm_kfree(dev, intf_modpriv);
+ break;
+ }
+ }
+ }
+
+ /* Remove the module from each instance */
+ list_for_each_entry_safe(inst_modpriv, inst_tmp,
+ &netcp_device->modpriv_head, inst_list) {
+ if (inst_modpriv->netcp_module == module) {
+ module->remove(netcp_device,
+ inst_modpriv->module_priv);
+ list_del(&inst_modpriv->inst_list);
+ devm_kfree(dev, inst_modpriv);
+ break;
+ }
+ }
+}
+
+void netcp_unregister_module(struct netcp_module *module)
+{
+ struct netcp_device *netcp_device;
+ struct netcp_module *module_tmp;
+
+ mutex_lock(&netcp_modules_lock);
+
+ list_for_each_entry(netcp_device, &netcp_devices, device_list) {
+ netcp_release_module(netcp_device, module);
+ }
+
+ /* Remove the module from the module list */
+ for_each_netcp_module(module_tmp) {
+ if (module == module_tmp) {
+ list_del(&module->module_list);
+ break;
+ }
+ }
+
+ mutex_unlock(&netcp_modules_lock);
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_module);
+
+void *netcp_module_get_intf_data(struct netcp_module *module,
+ struct netcp_intf *intf)
+{
+ struct netcp_intf_modpriv *intf_modpriv;
+
+ list_for_each_entry(intf_modpriv, &intf->module_head, intf_list)
+ if (intf_modpriv->netcp_module == module)
+ return intf_modpriv->module_priv;
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(netcp_module_get_intf_data);
+
+/* Module TX and RX Hook management */
+struct netcp_hook_list {
+ struct list_head list;
+ netcp_hook_rtn *hook_rtn;
+ void *hook_data;
+ int order;
+};
+
+int netcp_register_txhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+ struct netcp_hook_list *entry;
+ struct netcp_hook_list *next;
+ unsigned long flags;
+
+ entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->hook_rtn = hook_rtn;
+ entry->hook_data = hook_data;
+ entry->order = order;
+
+ spin_lock_irqsave(&netcp_priv->lock, flags);
+ list_for_each_entry(next, &netcp_priv->txhook_list_head, list) {
+ if (next->order > order)
+ break;
+ }
+ __list_add(&entry->list, next->list.prev, &next->list);
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_register_txhook);
+
+int netcp_unregister_txhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+ struct netcp_hook_list *next, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&netcp_priv->lock, flags);
+ list_for_each_entry_safe(next, n, &netcp_priv->txhook_list_head, list) {
+ if ((next->order == order) &&
+ (next->hook_rtn == hook_rtn) &&
+ (next->hook_data == hook_data)) {
+ list_del(&next->list);
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+ devm_kfree(netcp_priv->dev, next);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_txhook);
+
+int netcp_register_rxhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+ struct netcp_hook_list *entry;
+ struct netcp_hook_list *next;
+ unsigned long flags;
+
+ entry = devm_kzalloc(netcp_priv->dev, sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ entry->hook_rtn = hook_rtn;
+ entry->hook_data = hook_data;
+ entry->order = order;
+
+ spin_lock_irqsave(&netcp_priv->lock, flags);
+ list_for_each_entry(next, &netcp_priv->rxhook_list_head, list) {
+ if (next->order > order)
+ break;
+ }
+ __list_add(&entry->list, next->list.prev, &next->list);
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_register_rxhook);
+
+int netcp_unregister_rxhook(struct netcp_intf *netcp_priv, int order,
+ netcp_hook_rtn *hook_rtn, void *hook_data)
+{
+ struct netcp_hook_list *next, *n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&netcp_priv->lock, flags);
+ list_for_each_entry_safe(next, n, &netcp_priv->rxhook_list_head, list) {
+ if ((next->order == order) &&
+ (next->hook_rtn == hook_rtn) &&
+ (next->hook_data == hook_data)) {
+ list_del(&next->list);
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+ devm_kfree(netcp_priv->dev, next);
+ return 0;
+ }
+ }
+ spin_unlock_irqrestore(&netcp_priv->lock, flags);
+
+ return -ENOENT;
+}
+EXPORT_SYMBOL_GPL(netcp_unregister_rxhook);
+
+static void netcp_frag_free(bool is_frag, void *ptr)
+{
+ if (is_frag)
+ skb_free_frag(ptr);
+ else
+ kfree(ptr);
+}
+
+static void netcp_free_rx_desc_chain(struct netcp_intf *netcp,
+ struct knav_dma_desc *desc)
+{
+ struct knav_dma_desc *ndesc;
+ dma_addr_t dma_desc, dma_buf;
+ unsigned int buf_len, dma_sz = sizeof(*ndesc);
+ void *buf_ptr;
+ u32 tmp;
+
+ get_words(&dma_desc, 1, &desc->next_desc);
+
+ while (dma_desc) {
+ ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+ if (unlikely(!ndesc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+ break;
+ }
+ get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ buf_ptr = (void *)GET_SW_DATA0(ndesc);
+ buf_len = (int)GET_SW_DATA1(desc);
+ dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
+ __free_page(buf_ptr);
+ knav_pool_desc_put(netcp->rx_pool, desc);
+ }
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ buf_ptr = (void *)GET_SW_DATA0(desc);
+ buf_len = (int)GET_SW_DATA1(desc);
+
+ if (buf_ptr)
+ netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
+ knav_pool_desc_put(netcp->rx_pool, desc);
+}
+
+static void netcp_empty_rx_queue(struct netcp_intf *netcp)
+{
+ struct netcp_stats *rx_stats = &netcp->stats;
+ struct knav_dma_desc *desc;
+ unsigned int dma_sz;
+ dma_addr_t dma;
+
+ for (; ;) {
+ dma = knav_queue_pop(netcp->rx_queue, &dma_sz);
+ if (!dma)
+ break;
+
+ desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
+ if (unlikely(!desc)) {
+ dev_err(netcp->ndev_dev, "%s: failed to unmap Rx desc\n",
+ __func__);
+ rx_stats->rx_errors++;
+ continue;
+ }
+ netcp_free_rx_desc_chain(netcp, desc);
+ rx_stats->rx_dropped++;
+ }
+}
+
+static int netcp_process_one_rx_packet(struct netcp_intf *netcp)
+{
+ struct netcp_stats *rx_stats = &netcp->stats;
+ unsigned int dma_sz, buf_len, org_buf_len;
+ struct knav_dma_desc *desc, *ndesc;
+ unsigned int pkt_sz = 0, accum_sz;
+ struct netcp_hook_list *rx_hook;
+ dma_addr_t dma_desc, dma_buff;
+ struct netcp_packet p_info;
+ struct sk_buff *skb;
+ void *org_buf_ptr;
+ u32 tmp;
+
+ dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
+ if (!dma_desc)
+ return -1;
+
+ desc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+ if (unlikely(!desc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+ return 0;
+ }
+
+ get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ org_buf_ptr = (void *)GET_SW_DATA0(desc);
+ org_buf_len = (int)GET_SW_DATA1(desc);
+
+ if (unlikely(!org_buf_ptr)) {
+ dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
+ goto free_desc;
+ }
+
+ pkt_sz &= KNAV_DMA_DESC_PKT_LEN_MASK;
+ accum_sz = buf_len;
+ dma_unmap_single(netcp->dev, dma_buff, buf_len, DMA_FROM_DEVICE);
+
+ /* Build a new sk_buff for the primary buffer */
+ skb = build_skb(org_buf_ptr, org_buf_len);
+ if (unlikely(!skb)) {
+ dev_err(netcp->ndev_dev, "build_skb() failed\n");
+ goto free_desc;
+ }
+
+ /* update data, tail and len */
+ skb_reserve(skb, NETCP_SOP_OFFSET);
+ __skb_put(skb, buf_len);
+
+ /* Fill in the page fragment list */
+ while (dma_desc) {
+ struct page *page;
+
+ ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
+ if (unlikely(!ndesc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+ goto free_desc;
+ }
+
+ get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ page = (struct page *)GET_SW_DATA0(ndesc);
+
+ if (likely(dma_buff && buf_len && page)) {
+ dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ } else {
+ dev_err(netcp->ndev_dev, "Bad Rx desc dma_buff(%pad), len(%d), page(%p)\n",
+ &dma_buff, buf_len, page);
+ goto free_desc;
+ }
+
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ offset_in_page(dma_buff), buf_len, PAGE_SIZE);
+ accum_sz += buf_len;
+
+ /* Free the descriptor */
+ knav_pool_desc_put(netcp->rx_pool, ndesc);
+ }
+
+ /* check for packet len and warn */
+ if (unlikely(pkt_sz != accum_sz))
+ dev_dbg(netcp->ndev_dev, "mismatch in packet size(%d) & sum of fragments(%d)\n",
+ pkt_sz, accum_sz);
+
+ /* Newer version of the Ethernet switch can trim the Ethernet FCS
+ * from the packet and is indicated in hw_cap. So trim it only for
+ * older h/w
+ */
+ if (!(netcp->hw_cap & ETH_SW_CAN_REMOVE_ETH_FCS))
+ __pskb_trim(skb, skb->len - ETH_FCS_LEN);
+
+ /* Call each of the RX hooks */
+ p_info.skb = skb;
+ skb->dev = netcp->ndev;
+ p_info.rxtstamp_complete = false;
+ get_desc_info(&tmp, &p_info.eflags, desc);
+ p_info.epib = desc->epib;
+ p_info.psdata = (u32 __force *)desc->psdata;
+ p_info.eflags = ((p_info.eflags >> KNAV_DMA_DESC_EFLAGS_SHIFT) &
+ KNAV_DMA_DESC_EFLAGS_MASK);
+ list_for_each_entry(rx_hook, &netcp->rxhook_list_head, list) {
+ int ret;
+
+ ret = rx_hook->hook_rtn(rx_hook->order, rx_hook->hook_data,
+ &p_info);
+ if (unlikely(ret)) {
+ dev_err(netcp->ndev_dev, "RX hook %d failed: %d\n",
+ rx_hook->order, ret);
+ /* Free the primary descriptor */
+ rx_stats->rx_dropped++;
+ knav_pool_desc_put(netcp->rx_pool, desc);
+ dev_kfree_skb(skb);
+ return 0;
+ }
+ }
+ /* Free the primary descriptor */
+ knav_pool_desc_put(netcp->rx_pool, desc);
+
+ u64_stats_update_begin(&rx_stats->syncp_rx);
+ rx_stats->rx_packets++;
+ rx_stats->rx_bytes += skb->len;
+ u64_stats_update_end(&rx_stats->syncp_rx);
+
+ /* push skb up the stack */
+ skb->protocol = eth_type_trans(skb, netcp->ndev);
+ netif_receive_skb(skb);
+ return 0;
+
+free_desc:
+ netcp_free_rx_desc_chain(netcp, desc);
+ rx_stats->rx_errors++;
+ return 0;
+}
+
+static int netcp_process_rx_packets(struct netcp_intf *netcp,
+ unsigned int budget)
+{
+ int i;
+
+ for (i = 0; (i < budget) && !netcp_process_one_rx_packet(netcp); i++)
+ ;
+ return i;
+}
+
+/* Release descriptors and attached buffers from Rx FDQ */
+static void netcp_free_rx_buf(struct netcp_intf *netcp, int fdq)
+{
+ struct knav_dma_desc *desc;
+ unsigned int buf_len, dma_sz;
+ dma_addr_t dma;
+ void *buf_ptr;
+
+ /* Allocate descriptor */
+ while ((dma = knav_queue_pop(netcp->rx_fdq[fdq], &dma_sz))) {
+ desc = knav_pool_desc_unmap(netcp->rx_pool, dma, dma_sz);
+ if (unlikely(!desc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Rx desc\n");
+ continue;
+ }
+
+ get_org_pkt_info(&dma, &buf_len, desc);
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ buf_ptr = (void *)GET_SW_DATA0(desc);
+
+ if (unlikely(!dma)) {
+ dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
+ knav_pool_desc_put(netcp->rx_pool, desc);
+ continue;
+ }
+
+ if (unlikely(!buf_ptr)) {
+ dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
+ knav_pool_desc_put(netcp->rx_pool, desc);
+ continue;
+ }
+
+ if (fdq == 0) {
+ dma_unmap_single(netcp->dev, dma, buf_len,
+ DMA_FROM_DEVICE);
+ netcp_frag_free((buf_len <= PAGE_SIZE), buf_ptr);
+ } else {
+ dma_unmap_page(netcp->dev, dma, buf_len,
+ DMA_FROM_DEVICE);
+ __free_page(buf_ptr);
+ }
+
+ knav_pool_desc_put(netcp->rx_pool, desc);
+ }
+}
+
+static void netcp_rxpool_free(struct netcp_intf *netcp)
+{
+ int i;
+
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+ !IS_ERR_OR_NULL(netcp->rx_fdq[i]); i++)
+ netcp_free_rx_buf(netcp, i);
+
+ if (knav_pool_count(netcp->rx_pool) != netcp->rx_pool_size)
+ dev_err(netcp->ndev_dev, "Lost Rx (%d) descriptors\n",
+ netcp->rx_pool_size - knav_pool_count(netcp->rx_pool));
+
+ knav_pool_destroy(netcp->rx_pool);
+ netcp->rx_pool = NULL;
+}
+
+static int netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
+{
+ struct knav_dma_desc *hwdesc;
+ unsigned int buf_len, dma_sz;
+ u32 desc_info, pkt_info;
+ struct page *page;
+ dma_addr_t dma;
+ void *bufptr;
+ u32 sw_data[2];
+
+ /* Allocate descriptor */
+ hwdesc = knav_pool_desc_get(netcp->rx_pool);
+ if (IS_ERR_OR_NULL(hwdesc)) {
+ dev_dbg(netcp->ndev_dev, "out of rx pool desc\n");
+ return -ENOMEM;
+ }
+
+ if (likely(fdq == 0)) {
+ unsigned int primary_buf_len;
+ /* Allocate a primary receive queue entry */
+ buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
+ primary_buf_len = SKB_DATA_ALIGN(buf_len) +
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+
+ bufptr = netdev_alloc_frag(primary_buf_len);
+ sw_data[1] = primary_buf_len;
+
+ if (unlikely(!bufptr)) {
+ dev_warn_ratelimited(netcp->ndev_dev,
+ "Primary RX buffer alloc failed\n");
+ goto fail;
+ }
+ dma = dma_map_single(netcp->dev, bufptr, buf_len,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(netcp->dev, dma)))
+ goto fail;
+
+ /* warning!!!! We are saving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ sw_data[0] = (u32)bufptr;
+ } else {
+ /* Allocate a secondary receive queue entry */
+ page = alloc_page(GFP_ATOMIC | GFP_DMA);
+ if (unlikely(!page)) {
+ dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
+ goto fail;
+ }
+ buf_len = PAGE_SIZE;
+ dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
+ /* warning!!!! We are saving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ sw_data[0] = (u32)page;
+ sw_data[1] = 0;
+ }
+
+ desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
+ desc_info |= buf_len & KNAV_DMA_DESC_PKT_LEN_MASK;
+ pkt_info = KNAV_DMA_DESC_HAS_EPIB;
+ pkt_info |= KNAV_DMA_NUM_PS_WORDS << KNAV_DMA_DESC_PSLEN_SHIFT;
+ pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
+ KNAV_DMA_DESC_RETQ_SHIFT;
+ set_org_pkt_info(dma, buf_len, hwdesc);
+ SET_SW_DATA0(sw_data[0], hwdesc);
+ SET_SW_DATA1(sw_data[1], hwdesc);
+ set_desc_info(desc_info, pkt_info, hwdesc);
+
+ /* Push to FDQs */
+ knav_pool_desc_map(netcp->rx_pool, hwdesc, sizeof(*hwdesc), &dma,
+ &dma_sz);
+ knav_queue_push(netcp->rx_fdq[fdq], dma, sizeof(*hwdesc), 0);
+ return 0;
+
+fail:
+ knav_pool_desc_put(netcp->rx_pool, hwdesc);
+ return -ENOMEM;
+}
+
+/* Refill Rx FDQ with descriptors & attached buffers */
+static void netcp_rxpool_refill(struct netcp_intf *netcp)
+{
+ u32 fdq_deficit[KNAV_DMA_FDQ_PER_CHAN] = {0};
+ int i, ret = 0;
+
+ /* Calculate the FDQ deficit and refill */
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_fdq[i]; i++) {
+ fdq_deficit[i] = netcp->rx_queue_depths[i] -
+ knav_queue_get_count(netcp->rx_fdq[i]);
+
+ while (fdq_deficit[i]-- && !ret)
+ ret = netcp_allocate_rx_buf(netcp, i);
+ } /* end for fdqs */
+}
+
+/* NAPI poll */
+static int netcp_rx_poll(struct napi_struct *napi, int budget)
+{
+ struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
+ rx_napi);
+ unsigned int packets;
+
+ packets = netcp_process_rx_packets(netcp, budget);
+
+ netcp_rxpool_refill(netcp);
+ if (packets < budget) {
+ napi_complete_done(&netcp->rx_napi, packets);
+ knav_queue_enable_notify(netcp->rx_queue);
+ }
+
+ return packets;
+}
+
+static void netcp_rx_notify(void *arg)
+{
+ struct netcp_intf *netcp = arg;
+
+ knav_queue_disable_notify(netcp->rx_queue);
+ napi_schedule(&netcp->rx_napi);
+}
+
+static void netcp_free_tx_desc_chain(struct netcp_intf *netcp,
+ struct knav_dma_desc *desc,
+ unsigned int desc_sz)
+{
+ struct knav_dma_desc *ndesc = desc;
+ dma_addr_t dma_desc, dma_buf;
+ unsigned int buf_len;
+
+ while (ndesc) {
+ get_pkt_info(&dma_buf, &buf_len, &dma_desc, ndesc);
+
+ if (dma_buf && buf_len)
+ dma_unmap_single(netcp->dev, dma_buf, buf_len,
+ DMA_TO_DEVICE);
+ else
+ dev_warn(netcp->ndev_dev, "bad Tx desc buf(%pad), len(%d)\n",
+ &dma_buf, buf_len);
+
+ knav_pool_desc_put(netcp->tx_pool, ndesc);
+ ndesc = NULL;
+ if (dma_desc) {
+ ndesc = knav_pool_desc_unmap(netcp->tx_pool, dma_desc,
+ desc_sz);
+ if (!ndesc)
+ dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
+ }
+ }
+}
+
+static int netcp_process_tx_compl_packets(struct netcp_intf *netcp,
+ unsigned int budget)
+{
+ struct netcp_stats *tx_stats = &netcp->stats;
+ struct knav_dma_desc *desc;
+ struct netcp_tx_cb *tx_cb;
+ struct sk_buff *skb;
+ unsigned int dma_sz;
+ dma_addr_t dma;
+ int pkts = 0;
+
+ while (budget--) {
+ dma = knav_queue_pop(netcp->tx_compl_q, &dma_sz);
+ if (!dma)
+ break;
+ desc = knav_pool_desc_unmap(netcp->tx_pool, dma, dma_sz);
+ if (unlikely(!desc)) {
+ dev_err(netcp->ndev_dev, "failed to unmap Tx desc\n");
+ tx_stats->tx_errors++;
+ continue;
+ }
+
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ skb = (struct sk_buff *)GET_SW_DATA0(desc);
+ netcp_free_tx_desc_chain(netcp, desc, dma_sz);
+ if (!skb) {
+ dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
+ tx_stats->tx_errors++;
+ continue;
+ }
+
+ tx_cb = (struct netcp_tx_cb *)skb->cb;
+ if (tx_cb->txtstamp)
+ tx_cb->txtstamp(tx_cb->ts_context, skb);
+
+ if (netif_subqueue_stopped(netcp->ndev, skb) &&
+ netif_running(netcp->ndev) &&
+ (knav_pool_count(netcp->tx_pool) >
+ netcp->tx_resume_threshold)) {
+ u16 subqueue = skb_get_queue_mapping(skb);
+
+ netif_wake_subqueue(netcp->ndev, subqueue);
+ }
+
+ u64_stats_update_begin(&tx_stats->syncp_tx);
+ tx_stats->tx_packets++;
+ tx_stats->tx_bytes += skb->len;
+ u64_stats_update_end(&tx_stats->syncp_tx);
+ dev_kfree_skb(skb);
+ pkts++;
+ }
+ return pkts;
+}
+
+static int netcp_tx_poll(struct napi_struct *napi, int budget)
+{
+ int packets;
+ struct netcp_intf *netcp = container_of(napi, struct netcp_intf,
+ tx_napi);
+
+ packets = netcp_process_tx_compl_packets(netcp, budget);
+ if (packets < budget) {
+ napi_complete(&netcp->tx_napi);
+ knav_queue_enable_notify(netcp->tx_compl_q);
+ }
+
+ return packets;
+}
+
+static void netcp_tx_notify(void *arg)
+{
+ struct netcp_intf *netcp = arg;
+
+ knav_queue_disable_notify(netcp->tx_compl_q);
+ napi_schedule(&netcp->tx_napi);
+}
+
+static struct knav_dma_desc*
+netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
+{
+ struct knav_dma_desc *desc, *ndesc, *pdesc;
+ unsigned int pkt_len = skb_headlen(skb);
+ struct device *dev = netcp->dev;
+ dma_addr_t dma_addr;
+ unsigned int dma_sz;
+ int i;
+
+ /* Map the linear buffer */
+ dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev, dma_addr))) {
+ dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
+ return NULL;
+ }
+
+ desc = knav_pool_desc_get(netcp->tx_pool);
+ if (IS_ERR_OR_NULL(desc)) {
+ dev_err(netcp->ndev_dev, "out of TX desc\n");
+ dma_unmap_single(dev, dma_addr, pkt_len, DMA_TO_DEVICE);
+ return NULL;
+ }
+
+ set_pkt_info(dma_addr, pkt_len, 0, desc);
+ if (skb_is_nonlinear(skb)) {
+ prefetchw(skb_shinfo(skb));
+ } else {
+ desc->next_desc = 0;
+ goto upd_pkt_len;
+ }
+
+ pdesc = desc;
+
+ /* Handle the case where skb is fragmented in pages */
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+ struct page *page = skb_frag_page(frag);
+ u32 page_offset = skb_frag_off(frag);
+ u32 buf_len = skb_frag_size(frag);
+ dma_addr_t desc_dma;
+ u32 desc_dma_32;
+
+ dma_addr = dma_map_page(dev, page, page_offset, buf_len,
+ DMA_TO_DEVICE);
+ if (unlikely(!dma_addr)) {
+ dev_err(netcp->ndev_dev, "Failed to map skb page\n");
+ goto free_descs;
+ }
+
+ ndesc = knav_pool_desc_get(netcp->tx_pool);
+ if (IS_ERR_OR_NULL(ndesc)) {
+ dev_err(netcp->ndev_dev, "out of TX desc for frags\n");
+ dma_unmap_page(dev, dma_addr, buf_len, DMA_TO_DEVICE);
+ goto free_descs;
+ }
+
+ desc_dma = knav_pool_desc_virt_to_dma(netcp->tx_pool, ndesc);
+ set_pkt_info(dma_addr, buf_len, 0, ndesc);
+ desc_dma_32 = (u32)desc_dma;
+ set_words(&desc_dma_32, 1, &pdesc->next_desc);
+ pkt_len += buf_len;
+ if (pdesc != desc)
+ knav_pool_desc_map(netcp->tx_pool, pdesc,
+ sizeof(*pdesc), &desc_dma, &dma_sz);
+ pdesc = ndesc;
+ }
+ if (pdesc != desc)
+ knav_pool_desc_map(netcp->tx_pool, pdesc, sizeof(*pdesc),
+ &dma_addr, &dma_sz);
+
+ /* frag list based linkage is not supported for now. */
+ if (skb_shinfo(skb)->frag_list) {
+ dev_err_ratelimited(netcp->ndev_dev, "NETIF_F_FRAGLIST not supported\n");
+ goto free_descs;
+ }
+
+upd_pkt_len:
+ WARN_ON(pkt_len != skb->len);
+
+ pkt_len &= KNAV_DMA_DESC_PKT_LEN_MASK;
+ set_words(&pkt_len, 1, &desc->desc_info);
+ return desc;
+
+free_descs:
+ netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
+ return NULL;
+}
+
+static int netcp_tx_submit_skb(struct netcp_intf *netcp,
+ struct sk_buff *skb,
+ struct knav_dma_desc *desc)
+{
+ struct netcp_tx_pipe *tx_pipe = NULL;
+ struct netcp_hook_list *tx_hook;
+ struct netcp_packet p_info;
+ struct netcp_tx_cb *tx_cb;
+ unsigned int dma_sz;
+ dma_addr_t dma;
+ u32 tmp = 0;
+ int ret = 0;
+
+ p_info.netcp = netcp;
+ p_info.skb = skb;
+ p_info.tx_pipe = NULL;
+ p_info.psdata_len = 0;
+ p_info.ts_context = NULL;
+ p_info.txtstamp = NULL;
+ p_info.epib = desc->epib;
+ p_info.psdata = (u32 __force *)desc->psdata;
+ memset(p_info.epib, 0, KNAV_DMA_NUM_EPIB_WORDS * sizeof(__le32));
+
+ /* Find out where to inject the packet for transmission */
+ list_for_each_entry(tx_hook, &netcp->txhook_list_head, list) {
+ ret = tx_hook->hook_rtn(tx_hook->order, tx_hook->hook_data,
+ &p_info);
+ if (unlikely(ret != 0)) {
+ dev_err(netcp->ndev_dev, "TX hook %d rejected the packet with reason(%d)\n",
+ tx_hook->order, ret);
+ ret = (ret < 0) ? ret : NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ /* Make sure some TX hook claimed the packet */
+ tx_pipe = p_info.tx_pipe;
+ if (!tx_pipe) {
+ dev_err(netcp->ndev_dev, "No TX hook claimed the packet!\n");
+ ret = -ENXIO;
+ goto out;
+ }
+
+ tx_cb = (struct netcp_tx_cb *)skb->cb;
+ tx_cb->ts_context = p_info.ts_context;
+ tx_cb->txtstamp = p_info.txtstamp;
+
+ /* update descriptor */
+ if (p_info.psdata_len) {
+ /* psdata points to both native-endian and device-endian data */
+ __le32 *psdata = (void __force *)p_info.psdata;
+
+ set_words((u32 *)psdata +
+ (KNAV_DMA_NUM_PS_WORDS - p_info.psdata_len),
+ p_info.psdata_len, psdata);
+ tmp |= (p_info.psdata_len & KNAV_DMA_DESC_PSLEN_MASK) <<
+ KNAV_DMA_DESC_PSLEN_SHIFT;
+ }
+
+ tmp |= KNAV_DMA_DESC_HAS_EPIB |
+ ((netcp->tx_compl_qid & KNAV_DMA_DESC_RETQ_MASK) <<
+ KNAV_DMA_DESC_RETQ_SHIFT);
+
+ if (!(tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO)) {
+ tmp |= ((tx_pipe->switch_to_port & KNAV_DMA_DESC_PSFLAG_MASK) <<
+ KNAV_DMA_DESC_PSFLAG_SHIFT);
+ }
+
+ set_words(&tmp, 1, &desc->packet_info);
+ /* warning!!!! We are saving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ SET_SW_DATA0((u32)skb, desc);
+
+ if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
+ tmp = tx_pipe->switch_to_port;
+ set_words(&tmp, 1, &desc->tag_info);
+ }
+
+ /* submit packet descriptor */
+ ret = knav_pool_desc_map(netcp->tx_pool, desc, sizeof(*desc), &dma,
+ &dma_sz);
+ if (unlikely(ret)) {
+ dev_err(netcp->ndev_dev, "%s() failed to map desc\n", __func__);
+ ret = -ENOMEM;
+ goto out;
+ }
+ skb_tx_timestamp(skb);
+ knav_queue_push(tx_pipe->dma_queue, dma, dma_sz, 0);
+
+out:
+ return ret;
+}
+
+/* Submit the packet */
+static netdev_tx_t netcp_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_stats *tx_stats = &netcp->stats;
+ int subqueue = skb_get_queue_mapping(skb);
+ struct knav_dma_desc *desc;
+ int desc_count, ret = 0;
+
+ if (unlikely(skb->len <= 0)) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (unlikely(skb->len < NETCP_MIN_PACKET_SIZE)) {
+ ret = skb_padto(skb, NETCP_MIN_PACKET_SIZE);
+ if (ret < 0) {
+ /* If we get here, the skb has already been dropped */
+ dev_warn(netcp->ndev_dev, "padding failed (%d), packet dropped\n",
+ ret);
+ tx_stats->tx_dropped++;
+ return ret;
+ }
+ skb->len = NETCP_MIN_PACKET_SIZE;
+ }
+
+ desc = netcp_tx_map_skb(skb, netcp);
+ if (unlikely(!desc)) {
+ netif_stop_subqueue(ndev, subqueue);
+ ret = -ENOBUFS;
+ goto drop;
+ }
+
+ ret = netcp_tx_submit_skb(netcp, skb, desc);
+ if (ret)
+ goto drop;
+
+ /* Check Tx pool count & stop subqueue if needed */
+ desc_count = knav_pool_count(netcp->tx_pool);
+ if (desc_count < netcp->tx_pause_threshold) {
+ dev_dbg(netcp->ndev_dev, "pausing tx, count(%d)\n", desc_count);
+ netif_stop_subqueue(ndev, subqueue);
+ }
+ return NETDEV_TX_OK;
+
+drop:
+ tx_stats->tx_dropped++;
+ if (desc)
+ netcp_free_tx_desc_chain(netcp, desc, sizeof(*desc));
+ dev_kfree_skb(skb);
+ return ret;
+}
+
+int netcp_txpipe_close(struct netcp_tx_pipe *tx_pipe)
+{
+ if (tx_pipe->dma_channel) {
+ knav_dma_close_channel(tx_pipe->dma_channel);
+ tx_pipe->dma_channel = NULL;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_close);
+
+int netcp_txpipe_open(struct netcp_tx_pipe *tx_pipe)
+{
+ struct device *dev = tx_pipe->netcp_device->device;
+ struct knav_dma_cfg config;
+ int ret = 0;
+ u8 name[16];
+
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_MEM_TO_DEV;
+ config.u.tx.filt_einfo = false;
+ config.u.tx.filt_pswords = false;
+ config.u.tx.priority = DMA_PRIO_MED_L;
+
+ tx_pipe->dma_channel = knav_dma_open_channel(dev,
+ tx_pipe->dma_chan_name, &config);
+ if (IS_ERR(tx_pipe->dma_channel)) {
+ dev_err(dev, "failed opening tx chan(%s)\n",
+ tx_pipe->dma_chan_name);
+ ret = PTR_ERR(tx_pipe->dma_channel);
+ goto err;
+ }
+
+ snprintf(name, sizeof(name), "tx-pipe-%s", dev_name(dev));
+ tx_pipe->dma_queue = knav_queue_open(name, tx_pipe->dma_queue_id,
+ KNAV_QUEUE_SHARED);
+ if (IS_ERR(tx_pipe->dma_queue)) {
+ dev_err(dev, "Could not open DMA queue for channel \"%s\": %pe\n",
+ name, tx_pipe->dma_queue);
+ ret = PTR_ERR(tx_pipe->dma_queue);
+ goto err;
+ }
+
+ dev_dbg(dev, "opened tx pipe %s\n", name);
+ return 0;
+
+err:
+ if (!IS_ERR_OR_NULL(tx_pipe->dma_channel))
+ knav_dma_close_channel(tx_pipe->dma_channel);
+ tx_pipe->dma_channel = NULL;
+ return ret;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_open);
+
+int netcp_txpipe_init(struct netcp_tx_pipe *tx_pipe,
+ struct netcp_device *netcp_device,
+ const char *dma_chan_name, unsigned int dma_queue_id)
+{
+ memset(tx_pipe, 0, sizeof(*tx_pipe));
+ tx_pipe->netcp_device = netcp_device;
+ tx_pipe->dma_chan_name = dma_chan_name;
+ tx_pipe->dma_queue_id = dma_queue_id;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(netcp_txpipe_init);
+
+static struct netcp_addr *netcp_addr_find(struct netcp_intf *netcp,
+ const u8 *addr,
+ enum netcp_addr_type type)
+{
+ struct netcp_addr *naddr;
+
+ list_for_each_entry(naddr, &netcp->addr_list, node) {
+ if (naddr->type != type)
+ continue;
+ if (addr && memcmp(addr, naddr->addr, ETH_ALEN))
+ continue;
+ return naddr;
+ }
+
+ return NULL;
+}
+
+static struct netcp_addr *netcp_addr_add(struct netcp_intf *netcp,
+ const u8 *addr,
+ enum netcp_addr_type type)
+{
+ struct netcp_addr *naddr;
+
+ naddr = devm_kmalloc(netcp->dev, sizeof(*naddr), GFP_ATOMIC);
+ if (!naddr)
+ return NULL;
+
+ naddr->type = type;
+ naddr->flags = 0;
+ naddr->netcp = netcp;
+ if (addr)
+ ether_addr_copy(naddr->addr, addr);
+ else
+ eth_zero_addr(naddr->addr);
+ list_add_tail(&naddr->node, &netcp->addr_list);
+
+ return naddr;
+}
+
+static void netcp_addr_del(struct netcp_intf *netcp, struct netcp_addr *naddr)
+{
+ list_del(&naddr->node);
+ devm_kfree(netcp->dev, naddr);
+}
+
+static void netcp_addr_clear_mark(struct netcp_intf *netcp)
+{
+ struct netcp_addr *naddr;
+
+ list_for_each_entry(naddr, &netcp->addr_list, node)
+ naddr->flags = 0;
+}
+
+static void netcp_addr_add_mark(struct netcp_intf *netcp, const u8 *addr,
+ enum netcp_addr_type type)
+{
+ struct netcp_addr *naddr;
+
+ naddr = netcp_addr_find(netcp, addr, type);
+ if (naddr) {
+ naddr->flags |= ADDR_VALID;
+ return;
+ }
+
+ naddr = netcp_addr_add(netcp, addr, type);
+ if (!WARN_ON(!naddr))
+ naddr->flags |= ADDR_NEW;
+}
+
+static void netcp_addr_sweep_del(struct netcp_intf *netcp)
+{
+ struct netcp_addr *naddr, *tmp;
+ struct netcp_intf_modpriv *priv;
+ struct netcp_module *module;
+ int error;
+
+ list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
+ if (naddr->flags & (ADDR_VALID | ADDR_NEW))
+ continue;
+ dev_dbg(netcp->ndev_dev, "deleting address %pM, type %x\n",
+ naddr->addr, naddr->type);
+ for_each_module(netcp, priv) {
+ module = priv->netcp_module;
+ if (!module->del_addr)
+ continue;
+ error = module->del_addr(priv->module_priv,
+ naddr);
+ WARN_ON(error);
+ }
+ netcp_addr_del(netcp, naddr);
+ }
+}
+
+static void netcp_addr_sweep_add(struct netcp_intf *netcp)
+{
+ struct netcp_addr *naddr, *tmp;
+ struct netcp_intf_modpriv *priv;
+ struct netcp_module *module;
+ int error;
+
+ list_for_each_entry_safe(naddr, tmp, &netcp->addr_list, node) {
+ if (!(naddr->flags & ADDR_NEW))
+ continue;
+ dev_dbg(netcp->ndev_dev, "adding address %pM, type %x\n",
+ naddr->addr, naddr->type);
+
+ for_each_module(netcp, priv) {
+ module = priv->netcp_module;
+ if (!module->add_addr)
+ continue;
+ error = module->add_addr(priv->module_priv, naddr);
+ WARN_ON(error);
+ }
+ }
+}
+
+static int netcp_set_promiscuous(struct netcp_intf *netcp, bool promisc)
+{
+ struct netcp_intf_modpriv *priv;
+ struct netcp_module *module;
+ int error;
+
+ for_each_module(netcp, priv) {
+ module = priv->netcp_module;
+ if (!module->set_rx_mode)
+ continue;
+
+ error = module->set_rx_mode(priv->module_priv, promisc);
+ if (error)
+ return error;
+ }
+ return 0;
+}
+
+static void netcp_set_rx_mode(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netdev_hw_addr *ndev_addr;
+ bool promisc;
+
+ promisc = (ndev->flags & IFF_PROMISC ||
+ ndev->flags & IFF_ALLMULTI ||
+ netdev_mc_count(ndev) > NETCP_MAX_MCAST_ADDR);
+
+ spin_lock(&netcp->lock);
+ /* first clear all marks */
+ netcp_addr_clear_mark(netcp);
+
+ /* next add new entries, mark existing ones */
+ netcp_addr_add_mark(netcp, ndev->broadcast, ADDR_BCAST);
+ for_each_dev_addr(ndev, ndev_addr)
+ netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_DEV);
+ netdev_for_each_uc_addr(ndev_addr, ndev)
+ netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_UCAST);
+ netdev_for_each_mc_addr(ndev_addr, ndev)
+ netcp_addr_add_mark(netcp, ndev_addr->addr, ADDR_MCAST);
+
+ if (promisc)
+ netcp_addr_add_mark(netcp, NULL, ADDR_ANY);
+
+ /* finally sweep and callout into modules */
+ netcp_addr_sweep_del(netcp);
+ netcp_addr_sweep_add(netcp);
+ netcp_set_promiscuous(netcp, promisc);
+ spin_unlock(&netcp->lock);
+}
+
+static void netcp_free_navigator_resources(struct netcp_intf *netcp)
+{
+ int i;
+
+ if (netcp->rx_channel) {
+ knav_dma_close_channel(netcp->rx_channel);
+ netcp->rx_channel = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(netcp->rx_pool))
+ netcp_rxpool_free(netcp);
+
+ if (!IS_ERR_OR_NULL(netcp->rx_queue)) {
+ knav_queue_close(netcp->rx_queue);
+ netcp->rx_queue = NULL;
+ }
+
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN &&
+ !IS_ERR_OR_NULL(netcp->rx_fdq[i]) ; ++i) {
+ knav_queue_close(netcp->rx_fdq[i]);
+ netcp->rx_fdq[i] = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(netcp->tx_compl_q)) {
+ knav_queue_close(netcp->tx_compl_q);
+ netcp->tx_compl_q = NULL;
+ }
+
+ if (!IS_ERR_OR_NULL(netcp->tx_pool)) {
+ knav_pool_destroy(netcp->tx_pool);
+ netcp->tx_pool = NULL;
+ }
+}
+
+static int netcp_setup_navigator_resources(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct knav_queue_notify_config notify_cfg;
+ struct knav_dma_cfg config;
+ u32 last_fdq = 0;
+ u8 name[16];
+ int ret;
+ int i;
+
+ /* Create Rx/Tx descriptor pools */
+ snprintf(name, sizeof(name), "rx-pool-%s", ndev->name);
+ netcp->rx_pool = knav_pool_create(name, netcp->rx_pool_size,
+ netcp->rx_pool_region_id);
+ if (IS_ERR_OR_NULL(netcp->rx_pool)) {
+ dev_err(netcp->ndev_dev, "Couldn't create rx pool\n");
+ ret = PTR_ERR(netcp->rx_pool);
+ goto fail;
+ }
+
+ snprintf(name, sizeof(name), "tx-pool-%s", ndev->name);
+ netcp->tx_pool = knav_pool_create(name, netcp->tx_pool_size,
+ netcp->tx_pool_region_id);
+ if (IS_ERR_OR_NULL(netcp->tx_pool)) {
+ dev_err(netcp->ndev_dev, "Couldn't create tx pool\n");
+ ret = PTR_ERR(netcp->tx_pool);
+ goto fail;
+ }
+
+ /* open Tx completion queue */
+ snprintf(name, sizeof(name), "tx-compl-%s", ndev->name);
+ netcp->tx_compl_q = knav_queue_open(name, netcp->tx_compl_qid, 0);
+ if (IS_ERR(netcp->tx_compl_q)) {
+ ret = PTR_ERR(netcp->tx_compl_q);
+ goto fail;
+ }
+ netcp->tx_compl_qid = knav_queue_get_id(netcp->tx_compl_q);
+
+ /* Set notification for Tx completion */
+ notify_cfg.fn = netcp_tx_notify;
+ notify_cfg.fn_arg = netcp;
+ ret = knav_queue_device_control(netcp->tx_compl_q,
+ KNAV_QUEUE_SET_NOTIFIER,
+ (unsigned long)&notify_cfg);
+ if (ret)
+ goto fail;
+
+ knav_queue_disable_notify(netcp->tx_compl_q);
+
+ /* open Rx completion queue */
+ snprintf(name, sizeof(name), "rx-compl-%s", ndev->name);
+ netcp->rx_queue = knav_queue_open(name, netcp->rx_queue_id, 0);
+ if (IS_ERR(netcp->rx_queue)) {
+ ret = PTR_ERR(netcp->rx_queue);
+ goto fail;
+ }
+ netcp->rx_queue_id = knav_queue_get_id(netcp->rx_queue);
+
+ /* Set notification for Rx completion */
+ notify_cfg.fn = netcp_rx_notify;
+ notify_cfg.fn_arg = netcp;
+ ret = knav_queue_device_control(netcp->rx_queue,
+ KNAV_QUEUE_SET_NOTIFIER,
+ (unsigned long)&notify_cfg);
+ if (ret)
+ goto fail;
+
+ knav_queue_disable_notify(netcp->rx_queue);
+
+ /* open Rx FDQs */
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
+ ++i) {
+ snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
+ netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
+ if (IS_ERR(netcp->rx_fdq[i])) {
+ ret = PTR_ERR(netcp->rx_fdq[i]);
+ goto fail;
+ }
+ }
+
+ memset(&config, 0, sizeof(config));
+ config.direction = DMA_DEV_TO_MEM;
+ config.u.rx.einfo_present = true;
+ config.u.rx.psinfo_present = true;
+ config.u.rx.err_mode = DMA_DROP;
+ config.u.rx.desc_type = DMA_DESC_HOST;
+ config.u.rx.psinfo_at_sop = false;
+ config.u.rx.sop_offset = NETCP_SOP_OFFSET;
+ config.u.rx.dst_q = netcp->rx_queue_id;
+ config.u.rx.thresh = DMA_THRESH_NONE;
+
+ for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN; ++i) {
+ if (netcp->rx_fdq[i])
+ last_fdq = knav_queue_get_id(netcp->rx_fdq[i]);
+ config.u.rx.fdq[i] = last_fdq;
+ }
+
+ netcp->rx_channel = knav_dma_open_channel(netcp->netcp_device->device,
+ netcp->dma_chan_name, &config);
+ if (IS_ERR(netcp->rx_channel)) {
+ dev_err(netcp->ndev_dev, "failed opening rx chan(%s\n",
+ netcp->dma_chan_name);
+ ret = PTR_ERR(netcp->rx_channel);
+ goto fail;
+ }
+
+ dev_dbg(netcp->ndev_dev, "opened RX channel: %p\n", netcp->rx_channel);
+ return 0;
+
+fail:
+ netcp_free_navigator_resources(netcp);
+ return ret;
+}
+
+/* Open the device */
+static int netcp_ndo_open(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int ret;
+
+ netif_carrier_off(ndev);
+ ret = netcp_setup_navigator_resources(ndev);
+ if (ret) {
+ dev_err(netcp->ndev_dev, "Failed to setup navigator resources\n");
+ goto fail;
+ }
+
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (module->open) {
+ ret = module->open(intf_modpriv->module_priv, ndev);
+ if (ret != 0) {
+ dev_err(netcp->ndev_dev, "module open failed\n");
+ goto fail_open;
+ }
+ }
+ }
+
+ napi_enable(&netcp->rx_napi);
+ napi_enable(&netcp->tx_napi);
+ knav_queue_enable_notify(netcp->tx_compl_q);
+ knav_queue_enable_notify(netcp->rx_queue);
+ netcp_rxpool_refill(netcp);
+ netif_tx_wake_all_queues(ndev);
+ dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
+ return 0;
+
+fail_open:
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (module->close)
+ module->close(intf_modpriv->module_priv, ndev);
+ }
+
+fail:
+ netcp_free_navigator_resources(netcp);
+ return ret;
+}
+
+/* Close the device */
+static int netcp_ndo_stop(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int err = 0;
+
+ netif_tx_stop_all_queues(ndev);
+ netif_carrier_off(ndev);
+ netcp_addr_clear_mark(netcp);
+ netcp_addr_sweep_del(netcp);
+ knav_queue_disable_notify(netcp->rx_queue);
+ knav_queue_disable_notify(netcp->tx_compl_q);
+ napi_disable(&netcp->rx_napi);
+ napi_disable(&netcp->tx_napi);
+
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (module->close) {
+ err = module->close(intf_modpriv->module_priv, ndev);
+ if (err != 0)
+ dev_err(netcp->ndev_dev, "Close failed\n");
+ }
+ }
+
+ /* Recycle Rx descriptors from completion queue */
+ netcp_empty_rx_queue(netcp);
+
+ /* Recycle Tx descriptors from completion queue */
+ netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
+
+ if (knav_pool_count(netcp->tx_pool) != netcp->tx_pool_size)
+ dev_err(netcp->ndev_dev, "Lost (%d) Tx descs\n",
+ netcp->tx_pool_size - knav_pool_count(netcp->tx_pool));
+
+ netcp_free_navigator_resources(netcp);
+ dev_dbg(netcp->ndev_dev, "netcp device %s stopped\n", ndev->name);
+ return 0;
+}
+
+static int netcp_ndo_ioctl(struct net_device *ndev,
+ struct ifreq *req, int cmd)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ int ret = -1, err = -EOPNOTSUPP;
+
+ if (!netif_running(ndev))
+ return -EINVAL;
+
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (!module->ioctl)
+ continue;
+
+ err = module->ioctl(intf_modpriv->module_priv, req, cmd);
+ if ((err < 0) && (err != -EOPNOTSUPP)) {
+ ret = err;
+ goto out;
+ }
+ if (err == 0)
+ ret = err;
+ }
+
+out:
+ return (ret == 0) ? 0 : err;
+}
+
+static void netcp_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ unsigned int descs = knav_pool_count(netcp->tx_pool);
+
+ dev_err(netcp->ndev_dev, "transmit timed out tx descs(%d)\n", descs);
+ netcp_process_tx_compl_packets(netcp, netcp->tx_pool_size);
+ netif_trans_update(ndev);
+ netif_tx_wake_all_queues(ndev);
+}
+
+static int netcp_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ unsigned long flags;
+ int err = 0;
+
+ dev_dbg(netcp->ndev_dev, "adding rx vlan id: %d\n", vid);
+
+ spin_lock_irqsave(&netcp->lock, flags);
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if ((module->add_vid) && (vid != 0)) {
+ err = module->add_vid(intf_modpriv->module_priv, vid);
+ if (err != 0) {
+ dev_err(netcp->ndev_dev, "Could not add vlan id = %d\n",
+ vid);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&netcp->lock, flags);
+
+ return err;
+}
+
+static int netcp_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_intf_modpriv *intf_modpriv;
+ struct netcp_module *module;
+ unsigned long flags;
+ int err = 0;
+
+ dev_dbg(netcp->ndev_dev, "removing rx vlan id: %d\n", vid);
+
+ spin_lock_irqsave(&netcp->lock, flags);
+ for_each_module(netcp, intf_modpriv) {
+ module = intf_modpriv->netcp_module;
+ if (module->del_vid) {
+ err = module->del_vid(intf_modpriv->module_priv, vid);
+ if (err != 0) {
+ dev_err(netcp->ndev_dev, "Could not delete vlan id = %d\n",
+ vid);
+ break;
+ }
+ }
+ }
+ spin_unlock_irqrestore(&netcp->lock, flags);
+ return err;
+}
+
+static int netcp_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
+{
+ struct tc_mqprio_qopt *mqprio = type_data;
+ u8 num_tc;
+ int i;
+
+ /* setup tc must be called under rtnl lock */
+ ASSERT_RTNL();
+
+ if (type != TC_SETUP_QDISC_MQPRIO)
+ return -EOPNOTSUPP;
+
+ mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
+ num_tc = mqprio->num_tc;
+
+ /* Sanity-check the number of traffic classes requested */
+ if ((dev->real_num_tx_queues <= 1) ||
+ (dev->real_num_tx_queues < num_tc))
+ return -EINVAL;
+
+ /* Configure traffic class to queue mappings */
+ if (num_tc) {
+ netdev_set_num_tc(dev, num_tc);
+ for (i = 0; i < num_tc; i++)
+ netdev_set_tc_queue(dev, i, 1, i);
+ } else {
+ netdev_reset_tc(dev);
+ }
+
+ return 0;
+}
+
+static void
+netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_stats *p = &netcp->stats;
+ u64 rxpackets, rxbytes, txpackets, txbytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp_rx);
+ rxpackets = p->rx_packets;
+ rxbytes = p->rx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp_rx, start));
+
+ do {
+ start = u64_stats_fetch_begin_irq(&p->syncp_tx);
+ txpackets = p->tx_packets;
+ txbytes = p->tx_bytes;
+ } while (u64_stats_fetch_retry_irq(&p->syncp_tx, start));
+
+ stats->rx_packets = rxpackets;
+ stats->rx_bytes = rxbytes;
+ stats->tx_packets = txpackets;
+ stats->tx_bytes = txbytes;
+
+ /* The following are stored as 32 bit */
+ stats->rx_errors = p->rx_errors;
+ stats->rx_dropped = p->rx_dropped;
+ stats->tx_dropped = p->tx_dropped;
+}
+
+static const struct net_device_ops netcp_netdev_ops = {
+ .ndo_open = netcp_ndo_open,
+ .ndo_stop = netcp_ndo_stop,
+ .ndo_start_xmit = netcp_ndo_start_xmit,
+ .ndo_set_rx_mode = netcp_set_rx_mode,
+ .ndo_eth_ioctl = netcp_ndo_ioctl,
+ .ndo_get_stats64 = netcp_get_stats,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_vlan_rx_add_vid = netcp_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = netcp_rx_kill_vid,
+ .ndo_tx_timeout = netcp_ndo_tx_timeout,
+ .ndo_select_queue = dev_pick_tx_zero,
+ .ndo_setup_tc = netcp_setup_tc,
+};
+
+static int netcp_create_interface(struct netcp_device *netcp_device,
+ struct device_node *node_interface)
+{
+ struct device *dev = netcp_device->device;
+ struct device_node *node = dev->of_node;
+ struct netcp_intf *netcp;
+ struct net_device *ndev;
+ resource_size_t size;
+ struct resource res;
+ void __iomem *efuse = NULL;
+ u32 efuse_mac = 0;
+ u8 efuse_mac_addr[6];
+ u32 temp[2];
+ int ret = 0;
+
+ ndev = alloc_etherdev_mqs(sizeof(*netcp), 1, 1);
+ if (!ndev) {
+ dev_err(dev, "Error allocating netdev\n");
+ return -ENOMEM;
+ }
+
+ ndev->features |= NETIF_F_SG;
+ ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ ndev->hw_features = ndev->features;
+ ndev->vlan_features |= NETIF_F_SG;
+
+ /* MTU range: 68 - 9486 */
+ ndev->min_mtu = ETH_MIN_MTU;
+ ndev->max_mtu = NETCP_MAX_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN);
+
+ netcp = netdev_priv(ndev);
+ spin_lock_init(&netcp->lock);
+ INIT_LIST_HEAD(&netcp->module_head);
+ INIT_LIST_HEAD(&netcp->txhook_list_head);
+ INIT_LIST_HEAD(&netcp->rxhook_list_head);
+ INIT_LIST_HEAD(&netcp->addr_list);
+ u64_stats_init(&netcp->stats.syncp_rx);
+ u64_stats_init(&netcp->stats.syncp_tx);
+ netcp->netcp_device = netcp_device;
+ netcp->dev = netcp_device->device;
+ netcp->ndev = ndev;
+ netcp->ndev_dev = &ndev->dev;
+ netcp->msg_enable = netif_msg_init(netcp_debug_level, NETCP_DEBUG);
+ netcp->tx_pause_threshold = MAX_SKB_FRAGS;
+ netcp->tx_resume_threshold = netcp->tx_pause_threshold;
+ netcp->node_interface = node_interface;
+
+ ret = of_property_read_u32(node_interface, "efuse-mac", &efuse_mac);
+ if (efuse_mac) {
+ if (of_address_to_resource(node, NETCP_EFUSE_REG_INDEX, &res)) {
+ dev_err(dev, "could not find efuse-mac reg resource\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+ size = resource_size(&res);
+
+ if (!devm_request_mem_region(dev, res.start, size,
+ dev_name(dev))) {
+ dev_err(dev, "could not reserve resource\n");
+ ret = -ENOMEM;
+ goto quit;
+ }
+
+ efuse = devm_ioremap(dev, res.start, size);
+ if (!efuse) {
+ dev_err(dev, "could not map resource\n");
+ devm_release_mem_region(dev, res.start, size);
+ ret = -ENOMEM;
+ goto quit;
+ }
+
+ emac_arch_get_mac_addr(efuse_mac_addr, efuse, efuse_mac);
+ if (is_valid_ether_addr(efuse_mac_addr))
+ eth_hw_addr_set(ndev, efuse_mac_addr);
+ else
+ eth_hw_addr_random(ndev);
+
+ devm_iounmap(dev, efuse);
+ devm_release_mem_region(dev, res.start, size);
+ } else {
+ ret = of_get_ethdev_address(node_interface, ndev);
+ if (ret)
+ eth_hw_addr_random(ndev);
+ }
+
+ ret = of_property_read_string(node_interface, "rx-channel",
+ &netcp->dma_chan_name);
+ if (ret < 0) {
+ dev_err(dev, "missing \"rx-channel\" parameter\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+
+ ret = of_property_read_u32(node_interface, "rx-queue",
+ &netcp->rx_queue_id);
+ if (ret < 0) {
+ dev_warn(dev, "missing \"rx-queue\" parameter\n");
+ netcp->rx_queue_id = KNAV_QUEUE_QPEND;
+ }
+
+ ret = of_property_read_u32_array(node_interface, "rx-queue-depth",
+ netcp->rx_queue_depths,
+ KNAV_DMA_FDQ_PER_CHAN);
+ if (ret < 0) {
+ dev_err(dev, "missing \"rx-queue-depth\" parameter\n");
+ netcp->rx_queue_depths[0] = 128;
+ }
+
+ ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
+ if (ret < 0) {
+ dev_err(dev, "missing \"rx-pool\" parameter\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+ netcp->rx_pool_size = temp[0];
+ netcp->rx_pool_region_id = temp[1];
+
+ ret = of_property_read_u32_array(node_interface, "tx-pool", temp, 2);
+ if (ret < 0) {
+ dev_err(dev, "missing \"tx-pool\" parameter\n");
+ ret = -ENODEV;
+ goto quit;
+ }
+ netcp->tx_pool_size = temp[0];
+ netcp->tx_pool_region_id = temp[1];
+
+ if (netcp->tx_pool_size < MAX_SKB_FRAGS) {
+ dev_err(dev, "tx-pool size too small, must be at least %ld\n",
+ MAX_SKB_FRAGS);
+ ret = -ENODEV;
+ goto quit;
+ }
+
+ ret = of_property_read_u32(node_interface, "tx-completion-queue",
+ &netcp->tx_compl_qid);
+ if (ret < 0) {
+ dev_warn(dev, "missing \"tx-completion-queue\" parameter\n");
+ netcp->tx_compl_qid = KNAV_QUEUE_QPEND;
+ }
+
+ /* NAPI register */
+ netif_napi_add(ndev, &netcp->rx_napi, netcp_rx_poll);
+ netif_napi_add_tx(ndev, &netcp->tx_napi, netcp_tx_poll);
+
+ /* Register the network device */
+ ndev->dev_id = 0;
+ ndev->watchdog_timeo = NETCP_TX_TIMEOUT;
+ ndev->netdev_ops = &netcp_netdev_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ list_add_tail(&netcp->interface_list, &netcp_device->interface_head);
+ return 0;
+
+quit:
+ free_netdev(ndev);
+ return ret;
+}
+
+static void netcp_delete_interface(struct netcp_device *netcp_device,
+ struct net_device *ndev)
+{
+ struct netcp_intf_modpriv *intf_modpriv, *tmp;
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct netcp_module *module;
+
+ dev_dbg(netcp_device->device, "Removing interface \"%s\"\n",
+ ndev->name);
+
+ /* Notify each of the modules that the interface is going away */
+ list_for_each_entry_safe(intf_modpriv, tmp, &netcp->module_head,
+ intf_list) {
+ module = intf_modpriv->netcp_module;
+ dev_dbg(netcp_device->device, "Releasing module \"%s\"\n",
+ module->name);
+ if (module->release)
+ module->release(intf_modpriv->module_priv);
+ list_del(&intf_modpriv->intf_list);
+ }
+ WARN(!list_empty(&netcp->module_head), "%s interface module list is not empty!\n",
+ ndev->name);
+
+ list_del(&netcp->interface_list);
+
+ of_node_put(netcp->node_interface);
+ unregister_netdev(ndev);
+ free_netdev(ndev);
+}
+
+static int netcp_probe(struct platform_device *pdev)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct netcp_intf *netcp_intf, *netcp_tmp;
+ struct device_node *child, *interfaces;
+ struct netcp_device *netcp_device;
+ struct device *dev = &pdev->dev;
+ struct netcp_module *module;
+ int ret;
+
+ if (!knav_dma_device_ready() ||
+ !knav_qmss_device_ready())
+ return -EPROBE_DEFER;
+
+ if (!node) {
+ dev_err(dev, "could not find device info\n");
+ return -ENODEV;
+ }
+
+ /* Allocate a new NETCP device instance */
+ netcp_device = devm_kzalloc(dev, sizeof(*netcp_device), GFP_KERNEL);
+ if (!netcp_device)
+ return -ENOMEM;
+
+ pm_runtime_enable(&pdev->dev);
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable NETCP power-domain\n");
+ pm_runtime_disable(&pdev->dev);
+ return ret;
+ }
+
+ /* Initialize the NETCP device instance */
+ INIT_LIST_HEAD(&netcp_device->interface_head);
+ INIT_LIST_HEAD(&netcp_device->modpriv_head);
+ netcp_device->device = dev;
+ platform_set_drvdata(pdev, netcp_device);
+
+ /* create interfaces */
+ interfaces = of_get_child_by_name(node, "netcp-interfaces");
+ if (!interfaces) {
+ dev_err(dev, "could not find netcp-interfaces node\n");
+ ret = -ENODEV;
+ goto probe_quit;
+ }
+
+ for_each_available_child_of_node(interfaces, child) {
+ ret = netcp_create_interface(netcp_device, child);
+ if (ret) {
+ dev_err(dev, "could not create interface(%pOFn)\n",
+ child);
+ goto probe_quit_interface;
+ }
+ }
+
+ of_node_put(interfaces);
+
+ /* Add the device instance to the list */
+ list_add_tail(&netcp_device->device_list, &netcp_devices);
+
+ /* Probe & attach any modules already registered */
+ mutex_lock(&netcp_modules_lock);
+ for_each_netcp_module(module) {
+ ret = netcp_module_probe(netcp_device, module);
+ if (ret < 0)
+ dev_err(dev, "module(%s) probe failed\n", module->name);
+ }
+ mutex_unlock(&netcp_modules_lock);
+ return 0;
+
+probe_quit_interface:
+ list_for_each_entry_safe(netcp_intf, netcp_tmp,
+ &netcp_device->interface_head,
+ interface_list) {
+ netcp_delete_interface(netcp_device, netcp_intf->ndev);
+ }
+
+ of_node_put(interfaces);
+
+probe_quit:
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ return ret;
+}
+
+static int netcp_remove(struct platform_device *pdev)
+{
+ struct netcp_device *netcp_device = platform_get_drvdata(pdev);
+ struct netcp_intf *netcp_intf, *netcp_tmp;
+ struct netcp_inst_modpriv *inst_modpriv, *tmp;
+ struct netcp_module *module;
+
+ list_for_each_entry_safe(inst_modpriv, tmp, &netcp_device->modpriv_head,
+ inst_list) {
+ module = inst_modpriv->netcp_module;
+ dev_dbg(&pdev->dev, "Removing module \"%s\"\n", module->name);
+ module->remove(netcp_device, inst_modpriv->module_priv);
+ list_del(&inst_modpriv->inst_list);
+ }
+
+ /* now that all modules are removed, clean up the interfaces */
+ list_for_each_entry_safe(netcp_intf, netcp_tmp,
+ &netcp_device->interface_head,
+ interface_list) {
+ netcp_delete_interface(netcp_device, netcp_intf->ndev);
+ }
+
+ WARN(!list_empty(&netcp_device->interface_head),
+ "%s interface list not empty!\n", pdev->name);
+
+ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static const struct of_device_id of_match[] = {
+ { .compatible = "ti,netcp-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_match);
+
+static struct platform_driver netcp_driver = {
+ .driver = {
+ .name = "netcp-1.0",
+ .of_match_table = of_match,
+ },
+ .probe = netcp_probe,
+ .remove = netcp_remove,
+};
+module_platform_driver(netcp_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI NETCP driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
new file mode 100644
index 000000000..751fb0bc6
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -0,0 +1,3878 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Keystone GBE and XGBE subsystem code
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Sandeep Paulraj <s-paulraj@ti.com>
+ * Cyril Chemparathy <cyril@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ * Wingman Kwok <w-kwok2@ti.com>
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/if_vlan.h>
+#include <linux/ptp_classify.h>
+#include <linux/net_tstamp.h>
+#include <linux/ethtool.h>
+
+#include "cpsw.h"
+#include "cpsw_ale.h"
+#include "netcp.h"
+#include "cpts.h"
+
+#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
+#define NETCP_DRIVER_VERSION "v1.0"
+
+#define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
+#define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
+#define GBE_MINOR_VERSION(reg) (reg & 0xff)
+#define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
+
+/* 1G Ethernet SS defines */
+#define GBE_MODULE_NAME "netcp-gbe"
+#define GBE_SS_VERSION_14 0x4ed2
+
+#define GBE_SS_REG_INDEX 0
+#define GBE_SGMII34_REG_INDEX 1
+#define GBE_SM_REG_INDEX 2
+/* offset relative to base of GBE_SS_REG_INDEX */
+#define GBE13_SGMII_MODULE_OFFSET 0x100
+/* offset relative to base of GBE_SM_REG_INDEX */
+#define GBE13_HOST_PORT_OFFSET 0x34
+#define GBE13_SLAVE_PORT_OFFSET 0x60
+#define GBE13_EMAC_OFFSET 0x100
+#define GBE13_SLAVE_PORT2_OFFSET 0x200
+#define GBE13_HW_STATS_OFFSET 0x300
+#define GBE13_CPTS_OFFSET 0x500
+#define GBE13_ALE_OFFSET 0x600
+#define GBE13_HOST_PORT_NUM 0
+
+/* 1G Ethernet NU SS defines */
+#define GBENU_MODULE_NAME "netcp-gbenu"
+#define GBE_SS_ID_NU 0x4ee6
+#define GBE_SS_ID_2U 0x4ee8
+
+#define IS_SS_ID_MU(d) \
+ ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
+ (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
+
+#define IS_SS_ID_NU(d) \
+ (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
+
+#define IS_SS_ID_VER_14(d) \
+ (GBE_IDENT((d)->ss_version) == GBE_SS_VERSION_14)
+#define IS_SS_ID_2U(d) \
+ (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U)
+
+#define GBENU_SS_REG_INDEX 0
+#define GBENU_SM_REG_INDEX 1
+#define GBENU_SGMII_MODULE_OFFSET 0x100
+#define GBENU_HOST_PORT_OFFSET 0x1000
+#define GBENU_SLAVE_PORT_OFFSET 0x2000
+#define GBENU_EMAC_OFFSET 0x2330
+#define GBENU_HW_STATS_OFFSET 0x1a000
+#define GBENU_CPTS_OFFSET 0x1d000
+#define GBENU_ALE_OFFSET 0x1e000
+#define GBENU_HOST_PORT_NUM 0
+#define GBENU_SGMII_MODULE_SIZE 0x100
+
+/* 10G Ethernet SS defines */
+#define XGBE_MODULE_NAME "netcp-xgbe"
+#define XGBE_SS_VERSION_10 0x4ee4
+
+#define XGBE_SS_REG_INDEX 0
+#define XGBE_SM_REG_INDEX 1
+#define XGBE_SERDES_REG_INDEX 2
+
+/* offset relative to base of XGBE_SS_REG_INDEX */
+#define XGBE10_SGMII_MODULE_OFFSET 0x100
+#define IS_SS_ID_XGBE(d) ((d)->ss_version == XGBE_SS_VERSION_10)
+/* offset relative to base of XGBE_SM_REG_INDEX */
+#define XGBE10_HOST_PORT_OFFSET 0x34
+#define XGBE10_SLAVE_PORT_OFFSET 0x64
+#define XGBE10_EMAC_OFFSET 0x400
+#define XGBE10_CPTS_OFFSET 0x600
+#define XGBE10_ALE_OFFSET 0x700
+#define XGBE10_HW_STATS_OFFSET 0x800
+#define XGBE10_HOST_PORT_NUM 0
+
+#define GBE_TIMER_INTERVAL (HZ / 2)
+
+/* Soft reset register values */
+#define SOFT_RESET_MASK BIT(0)
+#define SOFT_RESET BIT(0)
+#define DEVICE_EMACSL_RESET_POLL_COUNT 100
+#define GMACSL_RET_WARN_RESET_INCOMPLETE -2
+
+#define MACSL_RX_ENABLE_CSF BIT(23)
+#define MACSL_ENABLE_EXT_CTL BIT(18)
+#define MACSL_XGMII_ENABLE BIT(13)
+#define MACSL_XGIG_MODE BIT(8)
+#define MACSL_GIG_MODE BIT(7)
+#define MACSL_GMII_ENABLE BIT(5)
+#define MACSL_FULLDUPLEX BIT(0)
+
+#define GBE_CTL_P0_ENABLE BIT(2)
+#define ETH_SW_CTL_P0_TX_CRC_REMOVE BIT(13)
+#define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
+#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
+#define GBE_STATS_CD_SEL BIT(28)
+
+#define GBE_PORT_MASK(x) (BIT(x) - 1)
+#define GBE_MASK_NO_PORTS 0
+
+#define GBE_DEF_1G_MAC_CONTROL \
+ (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
+ MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
+
+#define GBE_DEF_10G_MAC_CONTROL \
+ (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
+ MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
+
+#define GBE_STATSA_MODULE 0
+#define GBE_STATSB_MODULE 1
+#define GBE_STATSC_MODULE 2
+#define GBE_STATSD_MODULE 3
+
+#define GBENU_STATS0_MODULE 0
+#define GBENU_STATS1_MODULE 1
+#define GBENU_STATS2_MODULE 2
+#define GBENU_STATS3_MODULE 3
+#define GBENU_STATS4_MODULE 4
+#define GBENU_STATS5_MODULE 5
+#define GBENU_STATS6_MODULE 6
+#define GBENU_STATS7_MODULE 7
+#define GBENU_STATS8_MODULE 8
+
+#define XGBE_STATS0_MODULE 0
+#define XGBE_STATS1_MODULE 1
+#define XGBE_STATS2_MODULE 2
+
+/* s: 0-based slave_port */
+#define SGMII_BASE(d, s) \
+ (((s) < 2) ? (d)->sgmii_port_regs : (d)->sgmii_port34_regs)
+
+#define GBE_TX_QUEUE 648
+#define GBE_TXHOOK_ORDER 0
+#define GBE_RXHOOK_ORDER 0
+#define GBE_DEFAULT_ALE_AGEOUT 30
+#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
+#define SLAVE_LINK_IS_RGMII(s) \
+ (((s)->link_interface >= RGMII_LINK_MAC_PHY) && \
+ ((s)->link_interface <= RGMII_LINK_MAC_PHY_NO_MDIO))
+#define SLAVE_LINK_IS_SGMII(s) \
+ ((s)->link_interface <= SGMII_LINK_MAC_PHY_NO_MDIO)
+#define NETCP_LINK_STATE_INVALID -1
+
+#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+ offsetof(struct gbe##_##rb, rn)
+#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+ offsetof(struct gbenu##_##rb, rn)
+#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
+ offsetof(struct xgbe##_##rb, rn)
+#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
+
+#define HOST_TX_PRI_MAP_DEFAULT 0x00000000
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+/* Px_TS_CTL register fields */
+#define TS_RX_ANX_F_EN BIT(0)
+#define TS_RX_VLAN_LT1_EN BIT(1)
+#define TS_RX_VLAN_LT2_EN BIT(2)
+#define TS_RX_ANX_D_EN BIT(3)
+#define TS_TX_ANX_F_EN BIT(4)
+#define TS_TX_VLAN_LT1_EN BIT(5)
+#define TS_TX_VLAN_LT2_EN BIT(6)
+#define TS_TX_ANX_D_EN BIT(7)
+#define TS_LT2_EN BIT(8)
+#define TS_RX_ANX_E_EN BIT(9)
+#define TS_TX_ANX_E_EN BIT(10)
+#define TS_MSG_TYPE_EN_SHIFT 16
+#define TS_MSG_TYPE_EN_MASK 0xffff
+
+/* Px_TS_SEQ_LTYPE register fields */
+#define TS_SEQ_ID_OFS_SHIFT 16
+#define TS_SEQ_ID_OFS_MASK 0x3f
+
+/* Px_TS_CTL_LTYPE2 register fields */
+#define TS_107 BIT(16)
+#define TS_129 BIT(17)
+#define TS_130 BIT(18)
+#define TS_131 BIT(19)
+#define TS_132 BIT(20)
+#define TS_319 BIT(21)
+#define TS_320 BIT(22)
+#define TS_TTL_NONZERO BIT(23)
+#define TS_UNI_EN BIT(24)
+#define TS_UNI_EN_SHIFT 24
+
+#define TS_TX_ANX_ALL_EN \
+ (TS_TX_ANX_D_EN | TS_TX_ANX_E_EN | TS_TX_ANX_F_EN)
+
+#define TS_RX_ANX_ALL_EN \
+ (TS_RX_ANX_D_EN | TS_RX_ANX_E_EN | TS_RX_ANX_F_EN)
+
+#define TS_CTL_DST_PORT TS_319
+#define TS_CTL_DST_PORT_SHIFT 21
+
+#define TS_CTL_MADDR_ALL \
+ (TS_107 | TS_129 | TS_130 | TS_131 | TS_132)
+
+#define TS_CTL_MADDR_SHIFT 16
+
+/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
+#define EVENT_MSG_BITS (BIT(0) | BIT(1) | BIT(2) | BIT(3))
+#endif /* CONFIG_TI_CPTS */
+
+struct xgbe_ss_regs {
+ u32 id_ver;
+ u32 synce_count;
+ u32 synce_mux;
+ u32 control;
+};
+
+struct xgbe_switch_regs {
+ u32 id_ver;
+ u32 control;
+ u32 emcontrol;
+ u32 stat_port_en;
+ u32 ptype;
+ u32 soft_idle;
+ u32 thru_rate;
+ u32 gap_thresh;
+ u32 tx_start_wds;
+ u32 flow_control;
+ u32 cppi_thresh;
+};
+
+struct xgbe_port_regs {
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 sa_lo;
+ u32 sa_hi;
+ u32 ts_ctl;
+ u32 ts_seq_ltype;
+ u32 ts_vlan;
+ u32 ts_ctl_ltype2;
+ u32 ts_ctl2;
+ u32 control;
+};
+
+struct xgbe_host_port_regs {
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 src_id;
+ u32 rx_pri_map;
+ u32 rx_maxlen;
+};
+
+struct xgbe_emac_regs {
+ u32 id_ver;
+ u32 mac_control;
+ u32 mac_status;
+ u32 soft_reset;
+ u32 rx_maxlen;
+ u32 __reserved_0;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 em_control;
+ u32 __reserved_1;
+ u32 tx_gap;
+ u32 rsvd[4];
+};
+
+struct xgbe_host_hw_stats {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 __rsvd_0[3];
+ u32 rx_oversized_frames;
+ u32 __rsvd_1;
+ u32 rx_undersized_frames;
+ u32 __rsvd_2;
+ u32 overrun_type4;
+ u32 overrun_type5;
+ u32 rx_bytes;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 __rsvd_3[9];
+ u32 tx_bytes;
+ u32 tx_64byte_frames;
+ u32 tx_65_to_127byte_frames;
+ u32 tx_128_to_255byte_frames;
+ u32 tx_256_to_511byte_frames;
+ u32 tx_512_to_1023byte_frames;
+ u32 tx_1024byte_frames;
+ u32 net_bytes;
+ u32 rx_sof_overruns;
+ u32 rx_mof_overruns;
+ u32 rx_dma_overruns;
+};
+
+struct xgbe_hw_stats {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_crc_errors;
+ u32 rx_align_code_errors;
+ u32 rx_oversized_frames;
+ u32 rx_jabber_frames;
+ u32 rx_undersized_frames;
+ u32 rx_fragments;
+ u32 overrun_type4;
+ u32 overrun_type5;
+ u32 rx_bytes;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_deferred_frames;
+ u32 tx_collision_frames;
+ u32 tx_single_coll_frames;
+ u32 tx_mult_coll_frames;
+ u32 tx_excessive_collisions;
+ u32 tx_late_collisions;
+ u32 tx_underrun;
+ u32 tx_carrier_sense_errors;
+ u32 tx_bytes;
+ u32 tx_64byte_frames;
+ u32 tx_65_to_127byte_frames;
+ u32 tx_128_to_255byte_frames;
+ u32 tx_256_to_511byte_frames;
+ u32 tx_512_to_1023byte_frames;
+ u32 tx_1024byte_frames;
+ u32 net_bytes;
+ u32 rx_sof_overruns;
+ u32 rx_mof_overruns;
+ u32 rx_dma_overruns;
+};
+
+struct gbenu_ss_regs {
+ u32 id_ver;
+ u32 synce_count; /* NU */
+ u32 synce_mux; /* NU */
+ u32 control; /* 2U */
+ u32 __rsvd_0[2]; /* 2U */
+ u32 rgmii_status; /* 2U */
+ u32 ss_status; /* 2U */
+};
+
+struct gbenu_switch_regs {
+ u32 id_ver;
+ u32 control;
+ u32 __rsvd_0[2];
+ u32 emcontrol;
+ u32 stat_port_en;
+ u32 ptype; /* NU */
+ u32 soft_idle;
+ u32 thru_rate; /* NU */
+ u32 gap_thresh; /* NU */
+ u32 tx_start_wds; /* NU */
+ u32 eee_prescale; /* 2U */
+ u32 tx_g_oflow_thresh_set; /* NU */
+ u32 tx_g_oflow_thresh_clr; /* NU */
+ u32 tx_g_buf_thresh_set_l; /* NU */
+ u32 tx_g_buf_thresh_set_h; /* NU */
+ u32 tx_g_buf_thresh_clr_l; /* NU */
+ u32 tx_g_buf_thresh_clr_h; /* NU */
+};
+
+struct gbenu_port_regs {
+ u32 __rsvd_0;
+ u32 control;
+ u32 max_blks; /* 2U */
+ u32 mem_align1;
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map; /* NU */
+ u32 pri_ctl; /* 2U */
+ u32 rx_pri_map;
+ u32 rx_maxlen;
+ u32 tx_blks_pri; /* NU */
+ u32 __rsvd_1;
+ u32 idle2lpi; /* 2U */
+ u32 lpi2idle; /* 2U */
+ u32 eee_status; /* 2U */
+ u32 __rsvd_2;
+ u32 __rsvd_3[176]; /* NU: more to add */
+ u32 __rsvd_4[2];
+ u32 sa_lo;
+ u32 sa_hi;
+ u32 ts_ctl;
+ u32 ts_seq_ltype;
+ u32 ts_vlan;
+ u32 ts_ctl_ltype2;
+ u32 ts_ctl2;
+};
+
+struct gbenu_host_port_regs {
+ u32 __rsvd_0;
+ u32 control;
+ u32 flow_id_offset; /* 2U */
+ u32 __rsvd_1;
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map; /* NU */
+ u32 pri_ctl;
+ u32 rx_pri_map;
+ u32 rx_maxlen;
+ u32 tx_blks_pri; /* NU */
+ u32 __rsvd_2;
+ u32 idle2lpi; /* 2U */
+ u32 lpi2wake; /* 2U */
+ u32 eee_status; /* 2U */
+ u32 __rsvd_3;
+ u32 __rsvd_4[184]; /* NU */
+ u32 host_blks_pri; /* NU */
+};
+
+struct gbenu_emac_regs {
+ u32 mac_control;
+ u32 mac_status;
+ u32 soft_reset;
+ u32 boff_test;
+ u32 rx_pause;
+ u32 __rsvd_0[11]; /* NU */
+ u32 tx_pause;
+ u32 __rsvd_1[11]; /* NU */
+ u32 em_control;
+ u32 tx_gap;
+};
+
+/* Some hw stat regs are applicable to slave port only.
+ * This is handled by gbenu_et_stats struct. Also some
+ * are for SS version NU and some are for 2U.
+ */
+struct gbenu_hw_stats {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames; /* slave */
+ u32 rx_crc_errors;
+ u32 rx_align_code_errors; /* slave */
+ u32 rx_oversized_frames;
+ u32 rx_jabber_frames; /* slave */
+ u32 rx_undersized_frames;
+ u32 rx_fragments; /* slave */
+ u32 ale_drop;
+ u32 ale_overrun_drop;
+ u32 rx_bytes;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames; /* slave */
+ u32 tx_deferred_frames; /* slave */
+ u32 tx_collision_frames; /* slave */
+ u32 tx_single_coll_frames; /* slave */
+ u32 tx_mult_coll_frames; /* slave */
+ u32 tx_excessive_collisions; /* slave */
+ u32 tx_late_collisions; /* slave */
+ u32 rx_ipg_error; /* slave 10G only */
+ u32 tx_carrier_sense_errors; /* slave */
+ u32 tx_bytes;
+ u32 tx_64B_frames;
+ u32 tx_65_to_127B_frames;
+ u32 tx_128_to_255B_frames;
+ u32 tx_256_to_511B_frames;
+ u32 tx_512_to_1023B_frames;
+ u32 tx_1024B_frames;
+ u32 net_bytes;
+ u32 rx_bottom_fifo_drop;
+ u32 rx_port_mask_drop;
+ u32 rx_top_fifo_drop;
+ u32 ale_rate_limit_drop;
+ u32 ale_vid_ingress_drop;
+ u32 ale_da_eq_sa_drop;
+ u32 __rsvd_0[3];
+ u32 ale_unknown_ucast;
+ u32 ale_unknown_ucast_bytes;
+ u32 ale_unknown_mcast;
+ u32 ale_unknown_mcast_bytes;
+ u32 ale_unknown_bcast;
+ u32 ale_unknown_bcast_bytes;
+ u32 ale_pol_match;
+ u32 ale_pol_match_red; /* NU */
+ u32 ale_pol_match_yellow; /* NU */
+ u32 __rsvd_1[44];
+ u32 tx_mem_protect_err;
+ /* following NU only */
+ u32 tx_pri0;
+ u32 tx_pri1;
+ u32 tx_pri2;
+ u32 tx_pri3;
+ u32 tx_pri4;
+ u32 tx_pri5;
+ u32 tx_pri6;
+ u32 tx_pri7;
+ u32 tx_pri0_bcnt;
+ u32 tx_pri1_bcnt;
+ u32 tx_pri2_bcnt;
+ u32 tx_pri3_bcnt;
+ u32 tx_pri4_bcnt;
+ u32 tx_pri5_bcnt;
+ u32 tx_pri6_bcnt;
+ u32 tx_pri7_bcnt;
+ u32 tx_pri0_drop;
+ u32 tx_pri1_drop;
+ u32 tx_pri2_drop;
+ u32 tx_pri3_drop;
+ u32 tx_pri4_drop;
+ u32 tx_pri5_drop;
+ u32 tx_pri6_drop;
+ u32 tx_pri7_drop;
+ u32 tx_pri0_drop_bcnt;
+ u32 tx_pri1_drop_bcnt;
+ u32 tx_pri2_drop_bcnt;
+ u32 tx_pri3_drop_bcnt;
+ u32 tx_pri4_drop_bcnt;
+ u32 tx_pri5_drop_bcnt;
+ u32 tx_pri6_drop_bcnt;
+ u32 tx_pri7_drop_bcnt;
+};
+
+#define GBENU_HW_STATS_REG_MAP_SZ 0x200
+
+struct gbe_ss_regs {
+ u32 id_ver;
+ u32 synce_count;
+ u32 synce_mux;
+};
+
+struct gbe_ss_regs_ofs {
+ u16 id_ver;
+ u16 control;
+ u16 rgmii_status; /* 2U */
+};
+
+struct gbe_switch_regs {
+ u32 id_ver;
+ u32 control;
+ u32 soft_reset;
+ u32 stat_port_en;
+ u32 ptype;
+ u32 soft_idle;
+ u32 thru_rate;
+ u32 gap_thresh;
+ u32 tx_start_wds;
+ u32 flow_control;
+};
+
+struct gbe_switch_regs_ofs {
+ u16 id_ver;
+ u16 control;
+ u16 soft_reset;
+ u16 emcontrol;
+ u16 stat_port_en;
+ u16 ptype;
+ u16 flow_control;
+};
+
+struct gbe_port_regs {
+ u32 max_blks;
+ u32 blk_cnt;
+ u32 port_vlan;
+ u32 tx_pri_map;
+ u32 sa_lo;
+ u32 sa_hi;
+ u32 ts_ctl;
+ u32 ts_seq_ltype;
+ u32 ts_vlan;
+ u32 ts_ctl_ltype2;
+ u32 ts_ctl2;
+};
+
+struct gbe_port_regs_ofs {
+ u16 port_vlan;
+ u16 tx_pri_map;
+ u16 rx_pri_map;
+ u16 sa_lo;
+ u16 sa_hi;
+ u16 ts_ctl;
+ u16 ts_seq_ltype;
+ u16 ts_vlan;
+ u16 ts_ctl_ltype2;
+ u16 ts_ctl2;
+ u16 rx_maxlen; /* 2U, NU */
+};
+
+struct gbe_host_port_regs {
+ u32 src_id;
+ u32 port_vlan;
+ u32 rx_pri_map;
+ u32 rx_maxlen;
+};
+
+struct gbe_host_port_regs_ofs {
+ u16 port_vlan;
+ u16 tx_pri_map;
+ u16 rx_maxlen;
+};
+
+struct gbe_emac_regs {
+ u32 id_ver;
+ u32 mac_control;
+ u32 mac_status;
+ u32 soft_reset;
+ u32 rx_maxlen;
+ u32 __reserved_0;
+ u32 rx_pause;
+ u32 tx_pause;
+ u32 __reserved_1;
+ u32 rx_pri_map;
+ u32 rsvd[6];
+};
+
+struct gbe_emac_regs_ofs {
+ u16 mac_control;
+ u16 soft_reset;
+ u16 rx_maxlen;
+};
+
+struct gbe_hw_stats {
+ u32 rx_good_frames;
+ u32 rx_broadcast_frames;
+ u32 rx_multicast_frames;
+ u32 rx_pause_frames;
+ u32 rx_crc_errors;
+ u32 rx_align_code_errors;
+ u32 rx_oversized_frames;
+ u32 rx_jabber_frames;
+ u32 rx_undersized_frames;
+ u32 rx_fragments;
+ u32 __pad_0[2];
+ u32 rx_bytes;
+ u32 tx_good_frames;
+ u32 tx_broadcast_frames;
+ u32 tx_multicast_frames;
+ u32 tx_pause_frames;
+ u32 tx_deferred_frames;
+ u32 tx_collision_frames;
+ u32 tx_single_coll_frames;
+ u32 tx_mult_coll_frames;
+ u32 tx_excessive_collisions;
+ u32 tx_late_collisions;
+ u32 tx_underrun;
+ u32 tx_carrier_sense_errors;
+ u32 tx_bytes;
+ u32 tx_64byte_frames;
+ u32 tx_65_to_127byte_frames;
+ u32 tx_128_to_255byte_frames;
+ u32 tx_256_to_511byte_frames;
+ u32 tx_512_to_1023byte_frames;
+ u32 tx_1024byte_frames;
+ u32 net_bytes;
+ u32 rx_sof_overruns;
+ u32 rx_mof_overruns;
+ u32 rx_dma_overruns;
+};
+
+#define GBE_MAX_HW_STAT_MODS 9
+#define GBE_HW_STATS_REG_MAP_SZ 0x100
+
+struct ts_ctl {
+ int uni;
+ u8 dst_port_map;
+ u8 maddr_map;
+ u8 ts_mcast_type;
+};
+
+struct gbe_slave {
+ void __iomem *port_regs;
+ void __iomem *emac_regs;
+ struct gbe_port_regs_ofs port_regs_ofs;
+ struct gbe_emac_regs_ofs emac_regs_ofs;
+ int slave_num; /* 0 based logical number */
+ int port_num; /* actual port number */
+ atomic_t link_state;
+ bool open;
+ struct phy_device *phy;
+ u32 link_interface;
+ u32 mac_control;
+ u8 phy_port_t;
+ struct device_node *node;
+ struct device_node *phy_node;
+ struct ts_ctl ts_ctl;
+ struct list_head slave_list;
+};
+
+struct gbe_priv {
+ struct device *dev;
+ struct netcp_device *netcp_device;
+ struct timer_list timer;
+ u32 num_slaves;
+ u32 ale_ports;
+ bool enable_ale;
+ u8 max_num_slaves;
+ u8 max_num_ports; /* max_num_slaves + 1 */
+ u8 num_stats_mods;
+ struct netcp_tx_pipe tx_pipe;
+
+ int host_port;
+ u32 rx_packet_max;
+ u32 ss_version;
+ u32 stats_en_mask;
+
+ void __iomem *ss_regs;
+ void __iomem *switch_regs;
+ void __iomem *host_port_regs;
+ void __iomem *ale_reg;
+ void __iomem *cpts_reg;
+ void __iomem *sgmii_port_regs;
+ void __iomem *sgmii_port34_regs;
+ void __iomem *xgbe_serdes_regs;
+ void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
+
+ struct gbe_ss_regs_ofs ss_regs_ofs;
+ struct gbe_switch_regs_ofs switch_regs_ofs;
+ struct gbe_host_port_regs_ofs host_port_regs_ofs;
+
+ struct cpsw_ale *ale;
+ unsigned int tx_queue_id;
+ const char *dma_chan_name;
+
+ struct list_head gbe_intf_head;
+ struct list_head secondary_slaves;
+ struct net_device *dummy_ndev;
+
+ u64 *hw_stats;
+ u32 *hw_stats_prev;
+ const struct netcp_ethtool_stat *et_stats;
+ int num_et_stats;
+ /* Lock for updating the hwstats */
+ spinlock_t hw_stats_lock;
+
+ int cpts_registered;
+ struct cpts *cpts;
+ int rx_ts_enabled;
+ int tx_ts_enabled;
+};
+
+struct gbe_intf {
+ struct net_device *ndev;
+ struct device *dev;
+ struct gbe_priv *gbe_dev;
+ struct netcp_tx_pipe tx_pipe;
+ struct gbe_slave *slave;
+ struct list_head gbe_intf_list;
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+};
+
+static struct netcp_module gbe_module;
+static struct netcp_module xgbe_module;
+
+/* Statistic management */
+struct netcp_ethtool_stat {
+ char desc[ETH_GSTRING_LEN];
+ int type;
+ u32 size;
+ int offset;
+};
+
+#define GBE_STATSA_INFO(field) \
+{ \
+ "GBE_A:"#field, GBE_STATSA_MODULE, \
+ sizeof_field(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field) \
+}
+
+#define GBE_STATSB_INFO(field) \
+{ \
+ "GBE_B:"#field, GBE_STATSB_MODULE, \
+ sizeof_field(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field) \
+}
+
+#define GBE_STATSC_INFO(field) \
+{ \
+ "GBE_C:"#field, GBE_STATSC_MODULE, \
+ sizeof_field(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field) \
+}
+
+#define GBE_STATSD_INFO(field) \
+{ \
+ "GBE_D:"#field, GBE_STATSD_MODULE, \
+ sizeof_field(struct gbe_hw_stats, field), \
+ offsetof(struct gbe_hw_stats, field) \
+}
+
+static const struct netcp_ethtool_stat gbe13_et_stats[] = {
+ /* GBE module A */
+ GBE_STATSA_INFO(rx_good_frames),
+ GBE_STATSA_INFO(rx_broadcast_frames),
+ GBE_STATSA_INFO(rx_multicast_frames),
+ GBE_STATSA_INFO(rx_pause_frames),
+ GBE_STATSA_INFO(rx_crc_errors),
+ GBE_STATSA_INFO(rx_align_code_errors),
+ GBE_STATSA_INFO(rx_oversized_frames),
+ GBE_STATSA_INFO(rx_jabber_frames),
+ GBE_STATSA_INFO(rx_undersized_frames),
+ GBE_STATSA_INFO(rx_fragments),
+ GBE_STATSA_INFO(rx_bytes),
+ GBE_STATSA_INFO(tx_good_frames),
+ GBE_STATSA_INFO(tx_broadcast_frames),
+ GBE_STATSA_INFO(tx_multicast_frames),
+ GBE_STATSA_INFO(tx_pause_frames),
+ GBE_STATSA_INFO(tx_deferred_frames),
+ GBE_STATSA_INFO(tx_collision_frames),
+ GBE_STATSA_INFO(tx_single_coll_frames),
+ GBE_STATSA_INFO(tx_mult_coll_frames),
+ GBE_STATSA_INFO(tx_excessive_collisions),
+ GBE_STATSA_INFO(tx_late_collisions),
+ GBE_STATSA_INFO(tx_underrun),
+ GBE_STATSA_INFO(tx_carrier_sense_errors),
+ GBE_STATSA_INFO(tx_bytes),
+ GBE_STATSA_INFO(tx_64byte_frames),
+ GBE_STATSA_INFO(tx_65_to_127byte_frames),
+ GBE_STATSA_INFO(tx_128_to_255byte_frames),
+ GBE_STATSA_INFO(tx_256_to_511byte_frames),
+ GBE_STATSA_INFO(tx_512_to_1023byte_frames),
+ GBE_STATSA_INFO(tx_1024byte_frames),
+ GBE_STATSA_INFO(net_bytes),
+ GBE_STATSA_INFO(rx_sof_overruns),
+ GBE_STATSA_INFO(rx_mof_overruns),
+ GBE_STATSA_INFO(rx_dma_overruns),
+ /* GBE module B */
+ GBE_STATSB_INFO(rx_good_frames),
+ GBE_STATSB_INFO(rx_broadcast_frames),
+ GBE_STATSB_INFO(rx_multicast_frames),
+ GBE_STATSB_INFO(rx_pause_frames),
+ GBE_STATSB_INFO(rx_crc_errors),
+ GBE_STATSB_INFO(rx_align_code_errors),
+ GBE_STATSB_INFO(rx_oversized_frames),
+ GBE_STATSB_INFO(rx_jabber_frames),
+ GBE_STATSB_INFO(rx_undersized_frames),
+ GBE_STATSB_INFO(rx_fragments),
+ GBE_STATSB_INFO(rx_bytes),
+ GBE_STATSB_INFO(tx_good_frames),
+ GBE_STATSB_INFO(tx_broadcast_frames),
+ GBE_STATSB_INFO(tx_multicast_frames),
+ GBE_STATSB_INFO(tx_pause_frames),
+ GBE_STATSB_INFO(tx_deferred_frames),
+ GBE_STATSB_INFO(tx_collision_frames),
+ GBE_STATSB_INFO(tx_single_coll_frames),
+ GBE_STATSB_INFO(tx_mult_coll_frames),
+ GBE_STATSB_INFO(tx_excessive_collisions),
+ GBE_STATSB_INFO(tx_late_collisions),
+ GBE_STATSB_INFO(tx_underrun),
+ GBE_STATSB_INFO(tx_carrier_sense_errors),
+ GBE_STATSB_INFO(tx_bytes),
+ GBE_STATSB_INFO(tx_64byte_frames),
+ GBE_STATSB_INFO(tx_65_to_127byte_frames),
+ GBE_STATSB_INFO(tx_128_to_255byte_frames),
+ GBE_STATSB_INFO(tx_256_to_511byte_frames),
+ GBE_STATSB_INFO(tx_512_to_1023byte_frames),
+ GBE_STATSB_INFO(tx_1024byte_frames),
+ GBE_STATSB_INFO(net_bytes),
+ GBE_STATSB_INFO(rx_sof_overruns),
+ GBE_STATSB_INFO(rx_mof_overruns),
+ GBE_STATSB_INFO(rx_dma_overruns),
+ /* GBE module C */
+ GBE_STATSC_INFO(rx_good_frames),
+ GBE_STATSC_INFO(rx_broadcast_frames),
+ GBE_STATSC_INFO(rx_multicast_frames),
+ GBE_STATSC_INFO(rx_pause_frames),
+ GBE_STATSC_INFO(rx_crc_errors),
+ GBE_STATSC_INFO(rx_align_code_errors),
+ GBE_STATSC_INFO(rx_oversized_frames),
+ GBE_STATSC_INFO(rx_jabber_frames),
+ GBE_STATSC_INFO(rx_undersized_frames),
+ GBE_STATSC_INFO(rx_fragments),
+ GBE_STATSC_INFO(rx_bytes),
+ GBE_STATSC_INFO(tx_good_frames),
+ GBE_STATSC_INFO(tx_broadcast_frames),
+ GBE_STATSC_INFO(tx_multicast_frames),
+ GBE_STATSC_INFO(tx_pause_frames),
+ GBE_STATSC_INFO(tx_deferred_frames),
+ GBE_STATSC_INFO(tx_collision_frames),
+ GBE_STATSC_INFO(tx_single_coll_frames),
+ GBE_STATSC_INFO(tx_mult_coll_frames),
+ GBE_STATSC_INFO(tx_excessive_collisions),
+ GBE_STATSC_INFO(tx_late_collisions),
+ GBE_STATSC_INFO(tx_underrun),
+ GBE_STATSC_INFO(tx_carrier_sense_errors),
+ GBE_STATSC_INFO(tx_bytes),
+ GBE_STATSC_INFO(tx_64byte_frames),
+ GBE_STATSC_INFO(tx_65_to_127byte_frames),
+ GBE_STATSC_INFO(tx_128_to_255byte_frames),
+ GBE_STATSC_INFO(tx_256_to_511byte_frames),
+ GBE_STATSC_INFO(tx_512_to_1023byte_frames),
+ GBE_STATSC_INFO(tx_1024byte_frames),
+ GBE_STATSC_INFO(net_bytes),
+ GBE_STATSC_INFO(rx_sof_overruns),
+ GBE_STATSC_INFO(rx_mof_overruns),
+ GBE_STATSC_INFO(rx_dma_overruns),
+ /* GBE module D */
+ GBE_STATSD_INFO(rx_good_frames),
+ GBE_STATSD_INFO(rx_broadcast_frames),
+ GBE_STATSD_INFO(rx_multicast_frames),
+ GBE_STATSD_INFO(rx_pause_frames),
+ GBE_STATSD_INFO(rx_crc_errors),
+ GBE_STATSD_INFO(rx_align_code_errors),
+ GBE_STATSD_INFO(rx_oversized_frames),
+ GBE_STATSD_INFO(rx_jabber_frames),
+ GBE_STATSD_INFO(rx_undersized_frames),
+ GBE_STATSD_INFO(rx_fragments),
+ GBE_STATSD_INFO(rx_bytes),
+ GBE_STATSD_INFO(tx_good_frames),
+ GBE_STATSD_INFO(tx_broadcast_frames),
+ GBE_STATSD_INFO(tx_multicast_frames),
+ GBE_STATSD_INFO(tx_pause_frames),
+ GBE_STATSD_INFO(tx_deferred_frames),
+ GBE_STATSD_INFO(tx_collision_frames),
+ GBE_STATSD_INFO(tx_single_coll_frames),
+ GBE_STATSD_INFO(tx_mult_coll_frames),
+ GBE_STATSD_INFO(tx_excessive_collisions),
+ GBE_STATSD_INFO(tx_late_collisions),
+ GBE_STATSD_INFO(tx_underrun),
+ GBE_STATSD_INFO(tx_carrier_sense_errors),
+ GBE_STATSD_INFO(tx_bytes),
+ GBE_STATSD_INFO(tx_64byte_frames),
+ GBE_STATSD_INFO(tx_65_to_127byte_frames),
+ GBE_STATSD_INFO(tx_128_to_255byte_frames),
+ GBE_STATSD_INFO(tx_256_to_511byte_frames),
+ GBE_STATSD_INFO(tx_512_to_1023byte_frames),
+ GBE_STATSD_INFO(tx_1024byte_frames),
+ GBE_STATSD_INFO(net_bytes),
+ GBE_STATSD_INFO(rx_sof_overruns),
+ GBE_STATSD_INFO(rx_mof_overruns),
+ GBE_STATSD_INFO(rx_dma_overruns),
+};
+
+/* This is the size of entries in GBENU_STATS_HOST */
+#define GBENU_ET_STATS_HOST_SIZE 52
+
+#define GBENU_STATS_HOST(field) \
+{ \
+ "GBE_HOST:"#field, GBENU_STATS0_MODULE, \
+ sizeof_field(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+/* This is the size of entries in GBENU_STATS_PORT */
+#define GBENU_ET_STATS_PORT_SIZE 65
+
+#define GBENU_STATS_P1(field) \
+{ \
+ "GBE_P1:"#field, GBENU_STATS1_MODULE, \
+ sizeof_field(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P2(field) \
+{ \
+ "GBE_P2:"#field, GBENU_STATS2_MODULE, \
+ sizeof_field(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P3(field) \
+{ \
+ "GBE_P3:"#field, GBENU_STATS3_MODULE, \
+ sizeof_field(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P4(field) \
+{ \
+ "GBE_P4:"#field, GBENU_STATS4_MODULE, \
+ sizeof_field(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P5(field) \
+{ \
+ "GBE_P5:"#field, GBENU_STATS5_MODULE, \
+ sizeof_field(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P6(field) \
+{ \
+ "GBE_P6:"#field, GBENU_STATS6_MODULE, \
+ sizeof_field(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P7(field) \
+{ \
+ "GBE_P7:"#field, GBENU_STATS7_MODULE, \
+ sizeof_field(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+#define GBENU_STATS_P8(field) \
+{ \
+ "GBE_P8:"#field, GBENU_STATS8_MODULE, \
+ sizeof_field(struct gbenu_hw_stats, field), \
+ offsetof(struct gbenu_hw_stats, field) \
+}
+
+static const struct netcp_ethtool_stat gbenu_et_stats[] = {
+ /* GBENU Host Module */
+ GBENU_STATS_HOST(rx_good_frames),
+ GBENU_STATS_HOST(rx_broadcast_frames),
+ GBENU_STATS_HOST(rx_multicast_frames),
+ GBENU_STATS_HOST(rx_crc_errors),
+ GBENU_STATS_HOST(rx_oversized_frames),
+ GBENU_STATS_HOST(rx_undersized_frames),
+ GBENU_STATS_HOST(ale_drop),
+ GBENU_STATS_HOST(ale_overrun_drop),
+ GBENU_STATS_HOST(rx_bytes),
+ GBENU_STATS_HOST(tx_good_frames),
+ GBENU_STATS_HOST(tx_broadcast_frames),
+ GBENU_STATS_HOST(tx_multicast_frames),
+ GBENU_STATS_HOST(tx_bytes),
+ GBENU_STATS_HOST(tx_64B_frames),
+ GBENU_STATS_HOST(tx_65_to_127B_frames),
+ GBENU_STATS_HOST(tx_128_to_255B_frames),
+ GBENU_STATS_HOST(tx_256_to_511B_frames),
+ GBENU_STATS_HOST(tx_512_to_1023B_frames),
+ GBENU_STATS_HOST(tx_1024B_frames),
+ GBENU_STATS_HOST(net_bytes),
+ GBENU_STATS_HOST(rx_bottom_fifo_drop),
+ GBENU_STATS_HOST(rx_port_mask_drop),
+ GBENU_STATS_HOST(rx_top_fifo_drop),
+ GBENU_STATS_HOST(ale_rate_limit_drop),
+ GBENU_STATS_HOST(ale_vid_ingress_drop),
+ GBENU_STATS_HOST(ale_da_eq_sa_drop),
+ GBENU_STATS_HOST(ale_unknown_ucast),
+ GBENU_STATS_HOST(ale_unknown_ucast_bytes),
+ GBENU_STATS_HOST(ale_unknown_mcast),
+ GBENU_STATS_HOST(ale_unknown_mcast_bytes),
+ GBENU_STATS_HOST(ale_unknown_bcast),
+ GBENU_STATS_HOST(ale_unknown_bcast_bytes),
+ GBENU_STATS_HOST(ale_pol_match),
+ GBENU_STATS_HOST(ale_pol_match_red),
+ GBENU_STATS_HOST(ale_pol_match_yellow),
+ GBENU_STATS_HOST(tx_mem_protect_err),
+ GBENU_STATS_HOST(tx_pri0_drop),
+ GBENU_STATS_HOST(tx_pri1_drop),
+ GBENU_STATS_HOST(tx_pri2_drop),
+ GBENU_STATS_HOST(tx_pri3_drop),
+ GBENU_STATS_HOST(tx_pri4_drop),
+ GBENU_STATS_HOST(tx_pri5_drop),
+ GBENU_STATS_HOST(tx_pri6_drop),
+ GBENU_STATS_HOST(tx_pri7_drop),
+ GBENU_STATS_HOST(tx_pri0_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri1_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri2_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri3_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri4_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri5_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri6_drop_bcnt),
+ GBENU_STATS_HOST(tx_pri7_drop_bcnt),
+ /* GBENU Module 1 */
+ GBENU_STATS_P1(rx_good_frames),
+ GBENU_STATS_P1(rx_broadcast_frames),
+ GBENU_STATS_P1(rx_multicast_frames),
+ GBENU_STATS_P1(rx_pause_frames),
+ GBENU_STATS_P1(rx_crc_errors),
+ GBENU_STATS_P1(rx_align_code_errors),
+ GBENU_STATS_P1(rx_oversized_frames),
+ GBENU_STATS_P1(rx_jabber_frames),
+ GBENU_STATS_P1(rx_undersized_frames),
+ GBENU_STATS_P1(rx_fragments),
+ GBENU_STATS_P1(ale_drop),
+ GBENU_STATS_P1(ale_overrun_drop),
+ GBENU_STATS_P1(rx_bytes),
+ GBENU_STATS_P1(tx_good_frames),
+ GBENU_STATS_P1(tx_broadcast_frames),
+ GBENU_STATS_P1(tx_multicast_frames),
+ GBENU_STATS_P1(tx_pause_frames),
+ GBENU_STATS_P1(tx_deferred_frames),
+ GBENU_STATS_P1(tx_collision_frames),
+ GBENU_STATS_P1(tx_single_coll_frames),
+ GBENU_STATS_P1(tx_mult_coll_frames),
+ GBENU_STATS_P1(tx_excessive_collisions),
+ GBENU_STATS_P1(tx_late_collisions),
+ GBENU_STATS_P1(rx_ipg_error),
+ GBENU_STATS_P1(tx_carrier_sense_errors),
+ GBENU_STATS_P1(tx_bytes),
+ GBENU_STATS_P1(tx_64B_frames),
+ GBENU_STATS_P1(tx_65_to_127B_frames),
+ GBENU_STATS_P1(tx_128_to_255B_frames),
+ GBENU_STATS_P1(tx_256_to_511B_frames),
+ GBENU_STATS_P1(tx_512_to_1023B_frames),
+ GBENU_STATS_P1(tx_1024B_frames),
+ GBENU_STATS_P1(net_bytes),
+ GBENU_STATS_P1(rx_bottom_fifo_drop),
+ GBENU_STATS_P1(rx_port_mask_drop),
+ GBENU_STATS_P1(rx_top_fifo_drop),
+ GBENU_STATS_P1(ale_rate_limit_drop),
+ GBENU_STATS_P1(ale_vid_ingress_drop),
+ GBENU_STATS_P1(ale_da_eq_sa_drop),
+ GBENU_STATS_P1(ale_unknown_ucast),
+ GBENU_STATS_P1(ale_unknown_ucast_bytes),
+ GBENU_STATS_P1(ale_unknown_mcast),
+ GBENU_STATS_P1(ale_unknown_mcast_bytes),
+ GBENU_STATS_P1(ale_unknown_bcast),
+ GBENU_STATS_P1(ale_unknown_bcast_bytes),
+ GBENU_STATS_P1(ale_pol_match),
+ GBENU_STATS_P1(ale_pol_match_red),
+ GBENU_STATS_P1(ale_pol_match_yellow),
+ GBENU_STATS_P1(tx_mem_protect_err),
+ GBENU_STATS_P1(tx_pri0_drop),
+ GBENU_STATS_P1(tx_pri1_drop),
+ GBENU_STATS_P1(tx_pri2_drop),
+ GBENU_STATS_P1(tx_pri3_drop),
+ GBENU_STATS_P1(tx_pri4_drop),
+ GBENU_STATS_P1(tx_pri5_drop),
+ GBENU_STATS_P1(tx_pri6_drop),
+ GBENU_STATS_P1(tx_pri7_drop),
+ GBENU_STATS_P1(tx_pri0_drop_bcnt),
+ GBENU_STATS_P1(tx_pri1_drop_bcnt),
+ GBENU_STATS_P1(tx_pri2_drop_bcnt),
+ GBENU_STATS_P1(tx_pri3_drop_bcnt),
+ GBENU_STATS_P1(tx_pri4_drop_bcnt),
+ GBENU_STATS_P1(tx_pri5_drop_bcnt),
+ GBENU_STATS_P1(tx_pri6_drop_bcnt),
+ GBENU_STATS_P1(tx_pri7_drop_bcnt),
+ /* GBENU Module 2 */
+ GBENU_STATS_P2(rx_good_frames),
+ GBENU_STATS_P2(rx_broadcast_frames),
+ GBENU_STATS_P2(rx_multicast_frames),
+ GBENU_STATS_P2(rx_pause_frames),
+ GBENU_STATS_P2(rx_crc_errors),
+ GBENU_STATS_P2(rx_align_code_errors),
+ GBENU_STATS_P2(rx_oversized_frames),
+ GBENU_STATS_P2(rx_jabber_frames),
+ GBENU_STATS_P2(rx_undersized_frames),
+ GBENU_STATS_P2(rx_fragments),
+ GBENU_STATS_P2(ale_drop),
+ GBENU_STATS_P2(ale_overrun_drop),
+ GBENU_STATS_P2(rx_bytes),
+ GBENU_STATS_P2(tx_good_frames),
+ GBENU_STATS_P2(tx_broadcast_frames),
+ GBENU_STATS_P2(tx_multicast_frames),
+ GBENU_STATS_P2(tx_pause_frames),
+ GBENU_STATS_P2(tx_deferred_frames),
+ GBENU_STATS_P2(tx_collision_frames),
+ GBENU_STATS_P2(tx_single_coll_frames),
+ GBENU_STATS_P2(tx_mult_coll_frames),
+ GBENU_STATS_P2(tx_excessive_collisions),
+ GBENU_STATS_P2(tx_late_collisions),
+ GBENU_STATS_P2(rx_ipg_error),
+ GBENU_STATS_P2(tx_carrier_sense_errors),
+ GBENU_STATS_P2(tx_bytes),
+ GBENU_STATS_P2(tx_64B_frames),
+ GBENU_STATS_P2(tx_65_to_127B_frames),
+ GBENU_STATS_P2(tx_128_to_255B_frames),
+ GBENU_STATS_P2(tx_256_to_511B_frames),
+ GBENU_STATS_P2(tx_512_to_1023B_frames),
+ GBENU_STATS_P2(tx_1024B_frames),
+ GBENU_STATS_P2(net_bytes),
+ GBENU_STATS_P2(rx_bottom_fifo_drop),
+ GBENU_STATS_P2(rx_port_mask_drop),
+ GBENU_STATS_P2(rx_top_fifo_drop),
+ GBENU_STATS_P2(ale_rate_limit_drop),
+ GBENU_STATS_P2(ale_vid_ingress_drop),
+ GBENU_STATS_P2(ale_da_eq_sa_drop),
+ GBENU_STATS_P2(ale_unknown_ucast),
+ GBENU_STATS_P2(ale_unknown_ucast_bytes),
+ GBENU_STATS_P2(ale_unknown_mcast),
+ GBENU_STATS_P2(ale_unknown_mcast_bytes),
+ GBENU_STATS_P2(ale_unknown_bcast),
+ GBENU_STATS_P2(ale_unknown_bcast_bytes),
+ GBENU_STATS_P2(ale_pol_match),
+ GBENU_STATS_P2(ale_pol_match_red),
+ GBENU_STATS_P2(ale_pol_match_yellow),
+ GBENU_STATS_P2(tx_mem_protect_err),
+ GBENU_STATS_P2(tx_pri0_drop),
+ GBENU_STATS_P2(tx_pri1_drop),
+ GBENU_STATS_P2(tx_pri2_drop),
+ GBENU_STATS_P2(tx_pri3_drop),
+ GBENU_STATS_P2(tx_pri4_drop),
+ GBENU_STATS_P2(tx_pri5_drop),
+ GBENU_STATS_P2(tx_pri6_drop),
+ GBENU_STATS_P2(tx_pri7_drop),
+ GBENU_STATS_P2(tx_pri0_drop_bcnt),
+ GBENU_STATS_P2(tx_pri1_drop_bcnt),
+ GBENU_STATS_P2(tx_pri2_drop_bcnt),
+ GBENU_STATS_P2(tx_pri3_drop_bcnt),
+ GBENU_STATS_P2(tx_pri4_drop_bcnt),
+ GBENU_STATS_P2(tx_pri5_drop_bcnt),
+ GBENU_STATS_P2(tx_pri6_drop_bcnt),
+ GBENU_STATS_P2(tx_pri7_drop_bcnt),
+ /* GBENU Module 3 */
+ GBENU_STATS_P3(rx_good_frames),
+ GBENU_STATS_P3(rx_broadcast_frames),
+ GBENU_STATS_P3(rx_multicast_frames),
+ GBENU_STATS_P3(rx_pause_frames),
+ GBENU_STATS_P3(rx_crc_errors),
+ GBENU_STATS_P3(rx_align_code_errors),
+ GBENU_STATS_P3(rx_oversized_frames),
+ GBENU_STATS_P3(rx_jabber_frames),
+ GBENU_STATS_P3(rx_undersized_frames),
+ GBENU_STATS_P3(rx_fragments),
+ GBENU_STATS_P3(ale_drop),
+ GBENU_STATS_P3(ale_overrun_drop),
+ GBENU_STATS_P3(rx_bytes),
+ GBENU_STATS_P3(tx_good_frames),
+ GBENU_STATS_P3(tx_broadcast_frames),
+ GBENU_STATS_P3(tx_multicast_frames),
+ GBENU_STATS_P3(tx_pause_frames),
+ GBENU_STATS_P3(tx_deferred_frames),
+ GBENU_STATS_P3(tx_collision_frames),
+ GBENU_STATS_P3(tx_single_coll_frames),
+ GBENU_STATS_P3(tx_mult_coll_frames),
+ GBENU_STATS_P3(tx_excessive_collisions),
+ GBENU_STATS_P3(tx_late_collisions),
+ GBENU_STATS_P3(rx_ipg_error),
+ GBENU_STATS_P3(tx_carrier_sense_errors),
+ GBENU_STATS_P3(tx_bytes),
+ GBENU_STATS_P3(tx_64B_frames),
+ GBENU_STATS_P3(tx_65_to_127B_frames),
+ GBENU_STATS_P3(tx_128_to_255B_frames),
+ GBENU_STATS_P3(tx_256_to_511B_frames),
+ GBENU_STATS_P3(tx_512_to_1023B_frames),
+ GBENU_STATS_P3(tx_1024B_frames),
+ GBENU_STATS_P3(net_bytes),
+ GBENU_STATS_P3(rx_bottom_fifo_drop),
+ GBENU_STATS_P3(rx_port_mask_drop),
+ GBENU_STATS_P3(rx_top_fifo_drop),
+ GBENU_STATS_P3(ale_rate_limit_drop),
+ GBENU_STATS_P3(ale_vid_ingress_drop),
+ GBENU_STATS_P3(ale_da_eq_sa_drop),
+ GBENU_STATS_P3(ale_unknown_ucast),
+ GBENU_STATS_P3(ale_unknown_ucast_bytes),
+ GBENU_STATS_P3(ale_unknown_mcast),
+ GBENU_STATS_P3(ale_unknown_mcast_bytes),
+ GBENU_STATS_P3(ale_unknown_bcast),
+ GBENU_STATS_P3(ale_unknown_bcast_bytes),
+ GBENU_STATS_P3(ale_pol_match),
+ GBENU_STATS_P3(ale_pol_match_red),
+ GBENU_STATS_P3(ale_pol_match_yellow),
+ GBENU_STATS_P3(tx_mem_protect_err),
+ GBENU_STATS_P3(tx_pri0_drop),
+ GBENU_STATS_P3(tx_pri1_drop),
+ GBENU_STATS_P3(tx_pri2_drop),
+ GBENU_STATS_P3(tx_pri3_drop),
+ GBENU_STATS_P3(tx_pri4_drop),
+ GBENU_STATS_P3(tx_pri5_drop),
+ GBENU_STATS_P3(tx_pri6_drop),
+ GBENU_STATS_P3(tx_pri7_drop),
+ GBENU_STATS_P3(tx_pri0_drop_bcnt),
+ GBENU_STATS_P3(tx_pri1_drop_bcnt),
+ GBENU_STATS_P3(tx_pri2_drop_bcnt),
+ GBENU_STATS_P3(tx_pri3_drop_bcnt),
+ GBENU_STATS_P3(tx_pri4_drop_bcnt),
+ GBENU_STATS_P3(tx_pri5_drop_bcnt),
+ GBENU_STATS_P3(tx_pri6_drop_bcnt),
+ GBENU_STATS_P3(tx_pri7_drop_bcnt),
+ /* GBENU Module 4 */
+ GBENU_STATS_P4(rx_good_frames),
+ GBENU_STATS_P4(rx_broadcast_frames),
+ GBENU_STATS_P4(rx_multicast_frames),
+ GBENU_STATS_P4(rx_pause_frames),
+ GBENU_STATS_P4(rx_crc_errors),
+ GBENU_STATS_P4(rx_align_code_errors),
+ GBENU_STATS_P4(rx_oversized_frames),
+ GBENU_STATS_P4(rx_jabber_frames),
+ GBENU_STATS_P4(rx_undersized_frames),
+ GBENU_STATS_P4(rx_fragments),
+ GBENU_STATS_P4(ale_drop),
+ GBENU_STATS_P4(ale_overrun_drop),
+ GBENU_STATS_P4(rx_bytes),
+ GBENU_STATS_P4(tx_good_frames),
+ GBENU_STATS_P4(tx_broadcast_frames),
+ GBENU_STATS_P4(tx_multicast_frames),
+ GBENU_STATS_P4(tx_pause_frames),
+ GBENU_STATS_P4(tx_deferred_frames),
+ GBENU_STATS_P4(tx_collision_frames),
+ GBENU_STATS_P4(tx_single_coll_frames),
+ GBENU_STATS_P4(tx_mult_coll_frames),
+ GBENU_STATS_P4(tx_excessive_collisions),
+ GBENU_STATS_P4(tx_late_collisions),
+ GBENU_STATS_P4(rx_ipg_error),
+ GBENU_STATS_P4(tx_carrier_sense_errors),
+ GBENU_STATS_P4(tx_bytes),
+ GBENU_STATS_P4(tx_64B_frames),
+ GBENU_STATS_P4(tx_65_to_127B_frames),
+ GBENU_STATS_P4(tx_128_to_255B_frames),
+ GBENU_STATS_P4(tx_256_to_511B_frames),
+ GBENU_STATS_P4(tx_512_to_1023B_frames),
+ GBENU_STATS_P4(tx_1024B_frames),
+ GBENU_STATS_P4(net_bytes),
+ GBENU_STATS_P4(rx_bottom_fifo_drop),
+ GBENU_STATS_P4(rx_port_mask_drop),
+ GBENU_STATS_P4(rx_top_fifo_drop),
+ GBENU_STATS_P4(ale_rate_limit_drop),
+ GBENU_STATS_P4(ale_vid_ingress_drop),
+ GBENU_STATS_P4(ale_da_eq_sa_drop),
+ GBENU_STATS_P4(ale_unknown_ucast),
+ GBENU_STATS_P4(ale_unknown_ucast_bytes),
+ GBENU_STATS_P4(ale_unknown_mcast),
+ GBENU_STATS_P4(ale_unknown_mcast_bytes),
+ GBENU_STATS_P4(ale_unknown_bcast),
+ GBENU_STATS_P4(ale_unknown_bcast_bytes),
+ GBENU_STATS_P4(ale_pol_match),
+ GBENU_STATS_P4(ale_pol_match_red),
+ GBENU_STATS_P4(ale_pol_match_yellow),
+ GBENU_STATS_P4(tx_mem_protect_err),
+ GBENU_STATS_P4(tx_pri0_drop),
+ GBENU_STATS_P4(tx_pri1_drop),
+ GBENU_STATS_P4(tx_pri2_drop),
+ GBENU_STATS_P4(tx_pri3_drop),
+ GBENU_STATS_P4(tx_pri4_drop),
+ GBENU_STATS_P4(tx_pri5_drop),
+ GBENU_STATS_P4(tx_pri6_drop),
+ GBENU_STATS_P4(tx_pri7_drop),
+ GBENU_STATS_P4(tx_pri0_drop_bcnt),
+ GBENU_STATS_P4(tx_pri1_drop_bcnt),
+ GBENU_STATS_P4(tx_pri2_drop_bcnt),
+ GBENU_STATS_P4(tx_pri3_drop_bcnt),
+ GBENU_STATS_P4(tx_pri4_drop_bcnt),
+ GBENU_STATS_P4(tx_pri5_drop_bcnt),
+ GBENU_STATS_P4(tx_pri6_drop_bcnt),
+ GBENU_STATS_P4(tx_pri7_drop_bcnt),
+ /* GBENU Module 5 */
+ GBENU_STATS_P5(rx_good_frames),
+ GBENU_STATS_P5(rx_broadcast_frames),
+ GBENU_STATS_P5(rx_multicast_frames),
+ GBENU_STATS_P5(rx_pause_frames),
+ GBENU_STATS_P5(rx_crc_errors),
+ GBENU_STATS_P5(rx_align_code_errors),
+ GBENU_STATS_P5(rx_oversized_frames),
+ GBENU_STATS_P5(rx_jabber_frames),
+ GBENU_STATS_P5(rx_undersized_frames),
+ GBENU_STATS_P5(rx_fragments),
+ GBENU_STATS_P5(ale_drop),
+ GBENU_STATS_P5(ale_overrun_drop),
+ GBENU_STATS_P5(rx_bytes),
+ GBENU_STATS_P5(tx_good_frames),
+ GBENU_STATS_P5(tx_broadcast_frames),
+ GBENU_STATS_P5(tx_multicast_frames),
+ GBENU_STATS_P5(tx_pause_frames),
+ GBENU_STATS_P5(tx_deferred_frames),
+ GBENU_STATS_P5(tx_collision_frames),
+ GBENU_STATS_P5(tx_single_coll_frames),
+ GBENU_STATS_P5(tx_mult_coll_frames),
+ GBENU_STATS_P5(tx_excessive_collisions),
+ GBENU_STATS_P5(tx_late_collisions),
+ GBENU_STATS_P5(rx_ipg_error),
+ GBENU_STATS_P5(tx_carrier_sense_errors),
+ GBENU_STATS_P5(tx_bytes),
+ GBENU_STATS_P5(tx_64B_frames),
+ GBENU_STATS_P5(tx_65_to_127B_frames),
+ GBENU_STATS_P5(tx_128_to_255B_frames),
+ GBENU_STATS_P5(tx_256_to_511B_frames),
+ GBENU_STATS_P5(tx_512_to_1023B_frames),
+ GBENU_STATS_P5(tx_1024B_frames),
+ GBENU_STATS_P5(net_bytes),
+ GBENU_STATS_P5(rx_bottom_fifo_drop),
+ GBENU_STATS_P5(rx_port_mask_drop),
+ GBENU_STATS_P5(rx_top_fifo_drop),
+ GBENU_STATS_P5(ale_rate_limit_drop),
+ GBENU_STATS_P5(ale_vid_ingress_drop),
+ GBENU_STATS_P5(ale_da_eq_sa_drop),
+ GBENU_STATS_P5(ale_unknown_ucast),
+ GBENU_STATS_P5(ale_unknown_ucast_bytes),
+ GBENU_STATS_P5(ale_unknown_mcast),
+ GBENU_STATS_P5(ale_unknown_mcast_bytes),
+ GBENU_STATS_P5(ale_unknown_bcast),
+ GBENU_STATS_P5(ale_unknown_bcast_bytes),
+ GBENU_STATS_P5(ale_pol_match),
+ GBENU_STATS_P5(ale_pol_match_red),
+ GBENU_STATS_P5(ale_pol_match_yellow),
+ GBENU_STATS_P5(tx_mem_protect_err),
+ GBENU_STATS_P5(tx_pri0_drop),
+ GBENU_STATS_P5(tx_pri1_drop),
+ GBENU_STATS_P5(tx_pri2_drop),
+ GBENU_STATS_P5(tx_pri3_drop),
+ GBENU_STATS_P5(tx_pri4_drop),
+ GBENU_STATS_P5(tx_pri5_drop),
+ GBENU_STATS_P5(tx_pri6_drop),
+ GBENU_STATS_P5(tx_pri7_drop),
+ GBENU_STATS_P5(tx_pri0_drop_bcnt),
+ GBENU_STATS_P5(tx_pri1_drop_bcnt),
+ GBENU_STATS_P5(tx_pri2_drop_bcnt),
+ GBENU_STATS_P5(tx_pri3_drop_bcnt),
+ GBENU_STATS_P5(tx_pri4_drop_bcnt),
+ GBENU_STATS_P5(tx_pri5_drop_bcnt),
+ GBENU_STATS_P5(tx_pri6_drop_bcnt),
+ GBENU_STATS_P5(tx_pri7_drop_bcnt),
+ /* GBENU Module 6 */
+ GBENU_STATS_P6(rx_good_frames),
+ GBENU_STATS_P6(rx_broadcast_frames),
+ GBENU_STATS_P6(rx_multicast_frames),
+ GBENU_STATS_P6(rx_pause_frames),
+ GBENU_STATS_P6(rx_crc_errors),
+ GBENU_STATS_P6(rx_align_code_errors),
+ GBENU_STATS_P6(rx_oversized_frames),
+ GBENU_STATS_P6(rx_jabber_frames),
+ GBENU_STATS_P6(rx_undersized_frames),
+ GBENU_STATS_P6(rx_fragments),
+ GBENU_STATS_P6(ale_drop),
+ GBENU_STATS_P6(ale_overrun_drop),
+ GBENU_STATS_P6(rx_bytes),
+ GBENU_STATS_P6(tx_good_frames),
+ GBENU_STATS_P6(tx_broadcast_frames),
+ GBENU_STATS_P6(tx_multicast_frames),
+ GBENU_STATS_P6(tx_pause_frames),
+ GBENU_STATS_P6(tx_deferred_frames),
+ GBENU_STATS_P6(tx_collision_frames),
+ GBENU_STATS_P6(tx_single_coll_frames),
+ GBENU_STATS_P6(tx_mult_coll_frames),
+ GBENU_STATS_P6(tx_excessive_collisions),
+ GBENU_STATS_P6(tx_late_collisions),
+ GBENU_STATS_P6(rx_ipg_error),
+ GBENU_STATS_P6(tx_carrier_sense_errors),
+ GBENU_STATS_P6(tx_bytes),
+ GBENU_STATS_P6(tx_64B_frames),
+ GBENU_STATS_P6(tx_65_to_127B_frames),
+ GBENU_STATS_P6(tx_128_to_255B_frames),
+ GBENU_STATS_P6(tx_256_to_511B_frames),
+ GBENU_STATS_P6(tx_512_to_1023B_frames),
+ GBENU_STATS_P6(tx_1024B_frames),
+ GBENU_STATS_P6(net_bytes),
+ GBENU_STATS_P6(rx_bottom_fifo_drop),
+ GBENU_STATS_P6(rx_port_mask_drop),
+ GBENU_STATS_P6(rx_top_fifo_drop),
+ GBENU_STATS_P6(ale_rate_limit_drop),
+ GBENU_STATS_P6(ale_vid_ingress_drop),
+ GBENU_STATS_P6(ale_da_eq_sa_drop),
+ GBENU_STATS_P6(ale_unknown_ucast),
+ GBENU_STATS_P6(ale_unknown_ucast_bytes),
+ GBENU_STATS_P6(ale_unknown_mcast),
+ GBENU_STATS_P6(ale_unknown_mcast_bytes),
+ GBENU_STATS_P6(ale_unknown_bcast),
+ GBENU_STATS_P6(ale_unknown_bcast_bytes),
+ GBENU_STATS_P6(ale_pol_match),
+ GBENU_STATS_P6(ale_pol_match_red),
+ GBENU_STATS_P6(ale_pol_match_yellow),
+ GBENU_STATS_P6(tx_mem_protect_err),
+ GBENU_STATS_P6(tx_pri0_drop),
+ GBENU_STATS_P6(tx_pri1_drop),
+ GBENU_STATS_P6(tx_pri2_drop),
+ GBENU_STATS_P6(tx_pri3_drop),
+ GBENU_STATS_P6(tx_pri4_drop),
+ GBENU_STATS_P6(tx_pri5_drop),
+ GBENU_STATS_P6(tx_pri6_drop),
+ GBENU_STATS_P6(tx_pri7_drop),
+ GBENU_STATS_P6(tx_pri0_drop_bcnt),
+ GBENU_STATS_P6(tx_pri1_drop_bcnt),
+ GBENU_STATS_P6(tx_pri2_drop_bcnt),
+ GBENU_STATS_P6(tx_pri3_drop_bcnt),
+ GBENU_STATS_P6(tx_pri4_drop_bcnt),
+ GBENU_STATS_P6(tx_pri5_drop_bcnt),
+ GBENU_STATS_P6(tx_pri6_drop_bcnt),
+ GBENU_STATS_P6(tx_pri7_drop_bcnt),
+ /* GBENU Module 7 */
+ GBENU_STATS_P7(rx_good_frames),
+ GBENU_STATS_P7(rx_broadcast_frames),
+ GBENU_STATS_P7(rx_multicast_frames),
+ GBENU_STATS_P7(rx_pause_frames),
+ GBENU_STATS_P7(rx_crc_errors),
+ GBENU_STATS_P7(rx_align_code_errors),
+ GBENU_STATS_P7(rx_oversized_frames),
+ GBENU_STATS_P7(rx_jabber_frames),
+ GBENU_STATS_P7(rx_undersized_frames),
+ GBENU_STATS_P7(rx_fragments),
+ GBENU_STATS_P7(ale_drop),
+ GBENU_STATS_P7(ale_overrun_drop),
+ GBENU_STATS_P7(rx_bytes),
+ GBENU_STATS_P7(tx_good_frames),
+ GBENU_STATS_P7(tx_broadcast_frames),
+ GBENU_STATS_P7(tx_multicast_frames),
+ GBENU_STATS_P7(tx_pause_frames),
+ GBENU_STATS_P7(tx_deferred_frames),
+ GBENU_STATS_P7(tx_collision_frames),
+ GBENU_STATS_P7(tx_single_coll_frames),
+ GBENU_STATS_P7(tx_mult_coll_frames),
+ GBENU_STATS_P7(tx_excessive_collisions),
+ GBENU_STATS_P7(tx_late_collisions),
+ GBENU_STATS_P7(rx_ipg_error),
+ GBENU_STATS_P7(tx_carrier_sense_errors),
+ GBENU_STATS_P7(tx_bytes),
+ GBENU_STATS_P7(tx_64B_frames),
+ GBENU_STATS_P7(tx_65_to_127B_frames),
+ GBENU_STATS_P7(tx_128_to_255B_frames),
+ GBENU_STATS_P7(tx_256_to_511B_frames),
+ GBENU_STATS_P7(tx_512_to_1023B_frames),
+ GBENU_STATS_P7(tx_1024B_frames),
+ GBENU_STATS_P7(net_bytes),
+ GBENU_STATS_P7(rx_bottom_fifo_drop),
+ GBENU_STATS_P7(rx_port_mask_drop),
+ GBENU_STATS_P7(rx_top_fifo_drop),
+ GBENU_STATS_P7(ale_rate_limit_drop),
+ GBENU_STATS_P7(ale_vid_ingress_drop),
+ GBENU_STATS_P7(ale_da_eq_sa_drop),
+ GBENU_STATS_P7(ale_unknown_ucast),
+ GBENU_STATS_P7(ale_unknown_ucast_bytes),
+ GBENU_STATS_P7(ale_unknown_mcast),
+ GBENU_STATS_P7(ale_unknown_mcast_bytes),
+ GBENU_STATS_P7(ale_unknown_bcast),
+ GBENU_STATS_P7(ale_unknown_bcast_bytes),
+ GBENU_STATS_P7(ale_pol_match),
+ GBENU_STATS_P7(ale_pol_match_red),
+ GBENU_STATS_P7(ale_pol_match_yellow),
+ GBENU_STATS_P7(tx_mem_protect_err),
+ GBENU_STATS_P7(tx_pri0_drop),
+ GBENU_STATS_P7(tx_pri1_drop),
+ GBENU_STATS_P7(tx_pri2_drop),
+ GBENU_STATS_P7(tx_pri3_drop),
+ GBENU_STATS_P7(tx_pri4_drop),
+ GBENU_STATS_P7(tx_pri5_drop),
+ GBENU_STATS_P7(tx_pri6_drop),
+ GBENU_STATS_P7(tx_pri7_drop),
+ GBENU_STATS_P7(tx_pri0_drop_bcnt),
+ GBENU_STATS_P7(tx_pri1_drop_bcnt),
+ GBENU_STATS_P7(tx_pri2_drop_bcnt),
+ GBENU_STATS_P7(tx_pri3_drop_bcnt),
+ GBENU_STATS_P7(tx_pri4_drop_bcnt),
+ GBENU_STATS_P7(tx_pri5_drop_bcnt),
+ GBENU_STATS_P7(tx_pri6_drop_bcnt),
+ GBENU_STATS_P7(tx_pri7_drop_bcnt),
+ /* GBENU Module 8 */
+ GBENU_STATS_P8(rx_good_frames),
+ GBENU_STATS_P8(rx_broadcast_frames),
+ GBENU_STATS_P8(rx_multicast_frames),
+ GBENU_STATS_P8(rx_pause_frames),
+ GBENU_STATS_P8(rx_crc_errors),
+ GBENU_STATS_P8(rx_align_code_errors),
+ GBENU_STATS_P8(rx_oversized_frames),
+ GBENU_STATS_P8(rx_jabber_frames),
+ GBENU_STATS_P8(rx_undersized_frames),
+ GBENU_STATS_P8(rx_fragments),
+ GBENU_STATS_P8(ale_drop),
+ GBENU_STATS_P8(ale_overrun_drop),
+ GBENU_STATS_P8(rx_bytes),
+ GBENU_STATS_P8(tx_good_frames),
+ GBENU_STATS_P8(tx_broadcast_frames),
+ GBENU_STATS_P8(tx_multicast_frames),
+ GBENU_STATS_P8(tx_pause_frames),
+ GBENU_STATS_P8(tx_deferred_frames),
+ GBENU_STATS_P8(tx_collision_frames),
+ GBENU_STATS_P8(tx_single_coll_frames),
+ GBENU_STATS_P8(tx_mult_coll_frames),
+ GBENU_STATS_P8(tx_excessive_collisions),
+ GBENU_STATS_P8(tx_late_collisions),
+ GBENU_STATS_P8(rx_ipg_error),
+ GBENU_STATS_P8(tx_carrier_sense_errors),
+ GBENU_STATS_P8(tx_bytes),
+ GBENU_STATS_P8(tx_64B_frames),
+ GBENU_STATS_P8(tx_65_to_127B_frames),
+ GBENU_STATS_P8(tx_128_to_255B_frames),
+ GBENU_STATS_P8(tx_256_to_511B_frames),
+ GBENU_STATS_P8(tx_512_to_1023B_frames),
+ GBENU_STATS_P8(tx_1024B_frames),
+ GBENU_STATS_P8(net_bytes),
+ GBENU_STATS_P8(rx_bottom_fifo_drop),
+ GBENU_STATS_P8(rx_port_mask_drop),
+ GBENU_STATS_P8(rx_top_fifo_drop),
+ GBENU_STATS_P8(ale_rate_limit_drop),
+ GBENU_STATS_P8(ale_vid_ingress_drop),
+ GBENU_STATS_P8(ale_da_eq_sa_drop),
+ GBENU_STATS_P8(ale_unknown_ucast),
+ GBENU_STATS_P8(ale_unknown_ucast_bytes),
+ GBENU_STATS_P8(ale_unknown_mcast),
+ GBENU_STATS_P8(ale_unknown_mcast_bytes),
+ GBENU_STATS_P8(ale_unknown_bcast),
+ GBENU_STATS_P8(ale_unknown_bcast_bytes),
+ GBENU_STATS_P8(ale_pol_match),
+ GBENU_STATS_P8(ale_pol_match_red),
+ GBENU_STATS_P8(ale_pol_match_yellow),
+ GBENU_STATS_P8(tx_mem_protect_err),
+ GBENU_STATS_P8(tx_pri0_drop),
+ GBENU_STATS_P8(tx_pri1_drop),
+ GBENU_STATS_P8(tx_pri2_drop),
+ GBENU_STATS_P8(tx_pri3_drop),
+ GBENU_STATS_P8(tx_pri4_drop),
+ GBENU_STATS_P8(tx_pri5_drop),
+ GBENU_STATS_P8(tx_pri6_drop),
+ GBENU_STATS_P8(tx_pri7_drop),
+ GBENU_STATS_P8(tx_pri0_drop_bcnt),
+ GBENU_STATS_P8(tx_pri1_drop_bcnt),
+ GBENU_STATS_P8(tx_pri2_drop_bcnt),
+ GBENU_STATS_P8(tx_pri3_drop_bcnt),
+ GBENU_STATS_P8(tx_pri4_drop_bcnt),
+ GBENU_STATS_P8(tx_pri5_drop_bcnt),
+ GBENU_STATS_P8(tx_pri6_drop_bcnt),
+ GBENU_STATS_P8(tx_pri7_drop_bcnt),
+};
+
+#define XGBE_STATS0_INFO(field) \
+{ \
+ "GBE_0:"#field, XGBE_STATS0_MODULE, \
+ sizeof_field(struct xgbe_hw_stats, field), \
+ offsetof(struct xgbe_hw_stats, field) \
+}
+
+#define XGBE_STATS1_INFO(field) \
+{ \
+ "GBE_1:"#field, XGBE_STATS1_MODULE, \
+ sizeof_field(struct xgbe_hw_stats, field), \
+ offsetof(struct xgbe_hw_stats, field) \
+}
+
+#define XGBE_STATS2_INFO(field) \
+{ \
+ "GBE_2:"#field, XGBE_STATS2_MODULE, \
+ sizeof_field(struct xgbe_hw_stats, field), \
+ offsetof(struct xgbe_hw_stats, field) \
+}
+
+static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
+ /* GBE module 0 */
+ XGBE_STATS0_INFO(rx_good_frames),
+ XGBE_STATS0_INFO(rx_broadcast_frames),
+ XGBE_STATS0_INFO(rx_multicast_frames),
+ XGBE_STATS0_INFO(rx_oversized_frames),
+ XGBE_STATS0_INFO(rx_undersized_frames),
+ XGBE_STATS0_INFO(overrun_type4),
+ XGBE_STATS0_INFO(overrun_type5),
+ XGBE_STATS0_INFO(rx_bytes),
+ XGBE_STATS0_INFO(tx_good_frames),
+ XGBE_STATS0_INFO(tx_broadcast_frames),
+ XGBE_STATS0_INFO(tx_multicast_frames),
+ XGBE_STATS0_INFO(tx_bytes),
+ XGBE_STATS0_INFO(tx_64byte_frames),
+ XGBE_STATS0_INFO(tx_65_to_127byte_frames),
+ XGBE_STATS0_INFO(tx_128_to_255byte_frames),
+ XGBE_STATS0_INFO(tx_256_to_511byte_frames),
+ XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
+ XGBE_STATS0_INFO(tx_1024byte_frames),
+ XGBE_STATS0_INFO(net_bytes),
+ XGBE_STATS0_INFO(rx_sof_overruns),
+ XGBE_STATS0_INFO(rx_mof_overruns),
+ XGBE_STATS0_INFO(rx_dma_overruns),
+ /* XGBE module 1 */
+ XGBE_STATS1_INFO(rx_good_frames),
+ XGBE_STATS1_INFO(rx_broadcast_frames),
+ XGBE_STATS1_INFO(rx_multicast_frames),
+ XGBE_STATS1_INFO(rx_pause_frames),
+ XGBE_STATS1_INFO(rx_crc_errors),
+ XGBE_STATS1_INFO(rx_align_code_errors),
+ XGBE_STATS1_INFO(rx_oversized_frames),
+ XGBE_STATS1_INFO(rx_jabber_frames),
+ XGBE_STATS1_INFO(rx_undersized_frames),
+ XGBE_STATS1_INFO(rx_fragments),
+ XGBE_STATS1_INFO(overrun_type4),
+ XGBE_STATS1_INFO(overrun_type5),
+ XGBE_STATS1_INFO(rx_bytes),
+ XGBE_STATS1_INFO(tx_good_frames),
+ XGBE_STATS1_INFO(tx_broadcast_frames),
+ XGBE_STATS1_INFO(tx_multicast_frames),
+ XGBE_STATS1_INFO(tx_pause_frames),
+ XGBE_STATS1_INFO(tx_deferred_frames),
+ XGBE_STATS1_INFO(tx_collision_frames),
+ XGBE_STATS1_INFO(tx_single_coll_frames),
+ XGBE_STATS1_INFO(tx_mult_coll_frames),
+ XGBE_STATS1_INFO(tx_excessive_collisions),
+ XGBE_STATS1_INFO(tx_late_collisions),
+ XGBE_STATS1_INFO(tx_underrun),
+ XGBE_STATS1_INFO(tx_carrier_sense_errors),
+ XGBE_STATS1_INFO(tx_bytes),
+ XGBE_STATS1_INFO(tx_64byte_frames),
+ XGBE_STATS1_INFO(tx_65_to_127byte_frames),
+ XGBE_STATS1_INFO(tx_128_to_255byte_frames),
+ XGBE_STATS1_INFO(tx_256_to_511byte_frames),
+ XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
+ XGBE_STATS1_INFO(tx_1024byte_frames),
+ XGBE_STATS1_INFO(net_bytes),
+ XGBE_STATS1_INFO(rx_sof_overruns),
+ XGBE_STATS1_INFO(rx_mof_overruns),
+ XGBE_STATS1_INFO(rx_dma_overruns),
+ /* XGBE module 2 */
+ XGBE_STATS2_INFO(rx_good_frames),
+ XGBE_STATS2_INFO(rx_broadcast_frames),
+ XGBE_STATS2_INFO(rx_multicast_frames),
+ XGBE_STATS2_INFO(rx_pause_frames),
+ XGBE_STATS2_INFO(rx_crc_errors),
+ XGBE_STATS2_INFO(rx_align_code_errors),
+ XGBE_STATS2_INFO(rx_oversized_frames),
+ XGBE_STATS2_INFO(rx_jabber_frames),
+ XGBE_STATS2_INFO(rx_undersized_frames),
+ XGBE_STATS2_INFO(rx_fragments),
+ XGBE_STATS2_INFO(overrun_type4),
+ XGBE_STATS2_INFO(overrun_type5),
+ XGBE_STATS2_INFO(rx_bytes),
+ XGBE_STATS2_INFO(tx_good_frames),
+ XGBE_STATS2_INFO(tx_broadcast_frames),
+ XGBE_STATS2_INFO(tx_multicast_frames),
+ XGBE_STATS2_INFO(tx_pause_frames),
+ XGBE_STATS2_INFO(tx_deferred_frames),
+ XGBE_STATS2_INFO(tx_collision_frames),
+ XGBE_STATS2_INFO(tx_single_coll_frames),
+ XGBE_STATS2_INFO(tx_mult_coll_frames),
+ XGBE_STATS2_INFO(tx_excessive_collisions),
+ XGBE_STATS2_INFO(tx_late_collisions),
+ XGBE_STATS2_INFO(tx_underrun),
+ XGBE_STATS2_INFO(tx_carrier_sense_errors),
+ XGBE_STATS2_INFO(tx_bytes),
+ XGBE_STATS2_INFO(tx_64byte_frames),
+ XGBE_STATS2_INFO(tx_65_to_127byte_frames),
+ XGBE_STATS2_INFO(tx_128_to_255byte_frames),
+ XGBE_STATS2_INFO(tx_256_to_511byte_frames),
+ XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
+ XGBE_STATS2_INFO(tx_1024byte_frames),
+ XGBE_STATS2_INFO(net_bytes),
+ XGBE_STATS2_INFO(rx_sof_overruns),
+ XGBE_STATS2_INFO(rx_mof_overruns),
+ XGBE_STATS2_INFO(rx_dma_overruns),
+};
+
+#define for_each_intf(i, priv) \
+ list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
+
+#define for_each_sec_slave(slave, priv) \
+ list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
+
+#define first_sec_slave(priv) \
+ list_first_entry(&priv->secondary_slaves, \
+ struct gbe_slave, slave_list)
+
+static void keystone_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *info)
+{
+ strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
+}
+
+static u32 keystone_get_msglevel(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+
+ return netcp->msg_enable;
+}
+
+static void keystone_set_msglevel(struct net_device *ndev, u32 value)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+
+ netcp->msg_enable = value;
+}
+
+static struct gbe_intf *keystone_get_intf_data(struct netcp_intf *netcp)
+{
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+
+ return gbe_intf;
+}
+
+static void keystone_get_stat_strings(struct net_device *ndev,
+ uint32_t stringset, uint8_t *data)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+ struct gbe_priv *gbe_dev;
+ int i;
+
+ gbe_intf = keystone_get_intf_data(netcp);
+ if (!gbe_intf)
+ return;
+ gbe_dev = gbe_intf->gbe_dev;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < gbe_dev->num_et_stats; i++) {
+ memcpy(data, gbe_dev->et_stats[i].desc,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ case ETH_SS_TEST:
+ break;
+ }
+}
+
+static int keystone_get_sset_count(struct net_device *ndev, int stringset)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+ struct gbe_priv *gbe_dev;
+
+ gbe_intf = keystone_get_intf_data(netcp);
+ if (!gbe_intf)
+ return -EINVAL;
+ gbe_dev = gbe_intf->gbe_dev;
+
+ switch (stringset) {
+ case ETH_SS_TEST:
+ return 0;
+ case ETH_SS_STATS:
+ return gbe_dev->num_et_stats;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void gbe_reset_mod_stats(struct gbe_priv *gbe_dev, int stats_mod)
+{
+ void __iomem *base = gbe_dev->hw_stats_regs[stats_mod];
+ u32 __iomem *p_stats_entry;
+ int i;
+
+ for (i = 0; i < gbe_dev->num_et_stats; i++) {
+ if (gbe_dev->et_stats[i].type == stats_mod) {
+ p_stats_entry = base + gbe_dev->et_stats[i].offset;
+ gbe_dev->hw_stats[i] = 0;
+ gbe_dev->hw_stats_prev[i] = readl(p_stats_entry);
+ }
+ }
+}
+
+static inline void gbe_update_hw_stats_entry(struct gbe_priv *gbe_dev,
+ int et_stats_entry)
+{
+ void __iomem *base = NULL;
+ u32 __iomem *p_stats_entry;
+ u32 curr, delta;
+
+ /* The hw_stats_regs pointers are already
+ * properly set to point to the right base:
+ */
+ base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[et_stats_entry].type];
+ p_stats_entry = base + gbe_dev->et_stats[et_stats_entry].offset;
+ curr = readl(p_stats_entry);
+ delta = curr - gbe_dev->hw_stats_prev[et_stats_entry];
+ gbe_dev->hw_stats_prev[et_stats_entry] = curr;
+ gbe_dev->hw_stats[et_stats_entry] += delta;
+}
+
+static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+ int i;
+
+ for (i = 0; i < gbe_dev->num_et_stats; i++) {
+ gbe_update_hw_stats_entry(gbe_dev, i);
+
+ if (data)
+ data[i] = gbe_dev->hw_stats[i];
+ }
+}
+
+static inline void gbe_stats_mod_visible_ver14(struct gbe_priv *gbe_dev,
+ int stats_mod)
+{
+ u32 val;
+
+ val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+
+ switch (stats_mod) {
+ case GBE_STATSA_MODULE:
+ case GBE_STATSB_MODULE:
+ val &= ~GBE_STATS_CD_SEL;
+ break;
+ case GBE_STATSC_MODULE:
+ case GBE_STATSD_MODULE:
+ val |= GBE_STATS_CD_SEL;
+ break;
+ default:
+ return;
+ }
+
+ /* make the stat module visible */
+ writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
+}
+
+static void gbe_reset_mod_stats_ver14(struct gbe_priv *gbe_dev, int stats_mod)
+{
+ gbe_stats_mod_visible_ver14(gbe_dev, stats_mod);
+ gbe_reset_mod_stats(gbe_dev, stats_mod);
+}
+
+static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
+{
+ u32 half_num_et_stats = (gbe_dev->num_et_stats / 2);
+ int et_entry, j, pair;
+
+ for (pair = 0; pair < 2; pair++) {
+ gbe_stats_mod_visible_ver14(gbe_dev, (pair ?
+ GBE_STATSC_MODULE :
+ GBE_STATSA_MODULE));
+
+ for (j = 0; j < half_num_et_stats; j++) {
+ et_entry = pair * half_num_et_stats + j;
+ gbe_update_hw_stats_entry(gbe_dev, et_entry);
+
+ if (data)
+ data[et_entry] = gbe_dev->hw_stats[et_entry];
+ }
+ }
+}
+
+static void keystone_get_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats,
+ uint64_t *data)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+ struct gbe_priv *gbe_dev;
+
+ gbe_intf = keystone_get_intf_data(netcp);
+ if (!gbe_intf)
+ return;
+
+ gbe_dev = gbe_intf->gbe_dev;
+ spin_lock_bh(&gbe_dev->hw_stats_lock);
+ if (IS_SS_ID_VER_14(gbe_dev))
+ gbe_update_stats_ver14(gbe_dev, data);
+ else
+ gbe_update_stats(gbe_dev, data);
+ spin_unlock_bh(&gbe_dev->hw_stats_lock);
+}
+
+static int keystone_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct phy_device *phy = ndev->phydev;
+ struct gbe_intf *gbe_intf;
+
+ if (!phy)
+ return -EINVAL;
+
+ gbe_intf = keystone_get_intf_data(netcp);
+ if (!gbe_intf)
+ return -EINVAL;
+
+ if (!gbe_intf->slave)
+ return -EINVAL;
+
+ phy_ethtool_ksettings_get(phy, cmd);
+ cmd->base.port = gbe_intf->slave->phy_port_t;
+
+ return 0;
+}
+
+static int keystone_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct phy_device *phy = ndev->phydev;
+ struct gbe_intf *gbe_intf;
+ u8 port = cmd->base.port;
+ u32 advertising, supported;
+ u32 features;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ features = advertising & supported;
+
+ if (!phy)
+ return -EINVAL;
+
+ gbe_intf = keystone_get_intf_data(netcp);
+ if (!gbe_intf)
+ return -EINVAL;
+
+ if (!gbe_intf->slave)
+ return -EINVAL;
+
+ if (port != gbe_intf->slave->phy_port_t) {
+ if ((port == PORT_TP) && !(features & ADVERTISED_TP))
+ return -EINVAL;
+
+ if ((port == PORT_AUI) && !(features & ADVERTISED_AUI))
+ return -EINVAL;
+
+ if ((port == PORT_BNC) && !(features & ADVERTISED_BNC))
+ return -EINVAL;
+
+ if ((port == PORT_MII) && !(features & ADVERTISED_MII))
+ return -EINVAL;
+
+ if ((port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
+ return -EINVAL;
+ }
+
+ gbe_intf->slave->phy_port_t = port;
+ return phy_ethtool_ksettings_set(phy, cmd);
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+static int keystone_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf || !gbe_intf->gbe_dev->cpts)
+ return -EINVAL;
+
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_HARDWARE |
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_HARDWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE |
+ SOF_TIMESTAMPING_RAW_HARDWARE;
+ info->phc_index = gbe_intf->gbe_dev->cpts->phc_index;
+ info->tx_types =
+ (1 << HWTSTAMP_TX_OFF) |
+ (1 << HWTSTAMP_TX_ON);
+ info->rx_filters =
+ (1 << HWTSTAMP_FILTER_NONE) |
+ (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+ (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
+ return 0;
+}
+#else
+static int keystone_get_ts_info(struct net_device *ndev,
+ struct ethtool_ts_info *info)
+{
+ info->so_timestamping =
+ SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+ info->tx_types = 0;
+ info->rx_filters = 0;
+ return 0;
+}
+#endif /* CONFIG_TI_CPTS */
+
+static const struct ethtool_ops keystone_ethtool_ops = {
+ .get_drvinfo = keystone_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = keystone_get_msglevel,
+ .set_msglevel = keystone_set_msglevel,
+ .get_strings = keystone_get_stat_strings,
+ .get_sset_count = keystone_get_sset_count,
+ .get_ethtool_stats = keystone_get_ethtool_stats,
+ .get_link_ksettings = keystone_get_link_ksettings,
+ .set_link_ksettings = keystone_set_link_ksettings,
+ .get_ts_info = keystone_get_ts_info,
+};
+
+static void gbe_set_slave_mac(struct gbe_slave *slave,
+ struct gbe_intf *gbe_intf)
+{
+ struct net_device *ndev = gbe_intf->ndev;
+
+ writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
+ writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
+}
+
+static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
+{
+ if (priv->host_port == 0)
+ return slave_num + 1;
+
+ return slave_num;
+}
+
+static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
+ struct net_device *ndev,
+ struct gbe_slave *slave,
+ int up)
+{
+ struct phy_device *phy = slave->phy;
+ u32 mac_control = 0;
+
+ if (up) {
+ mac_control = slave->mac_control;
+ if (phy && (phy->speed == SPEED_1000)) {
+ mac_control |= MACSL_GIG_MODE;
+ mac_control &= ~MACSL_XGIG_MODE;
+ } else if (phy && (phy->speed == SPEED_10000)) {
+ mac_control |= MACSL_XGIG_MODE;
+ mac_control &= ~MACSL_GIG_MODE;
+ }
+
+ writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
+ mac_control));
+
+ cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+ ALE_PORT_STATE,
+ ALE_PORT_STATE_FORWARD);
+
+ if (ndev && slave->open &&
+ ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != RGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != XGMII_LINK_MAC_PHY)))
+ netif_carrier_on(ndev);
+ } else {
+ writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
+ mac_control));
+ cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+ ALE_PORT_STATE,
+ ALE_PORT_STATE_DISABLE);
+ if (ndev &&
+ ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != RGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != XGMII_LINK_MAC_PHY)))
+ netif_carrier_off(ndev);
+ }
+
+ if (phy)
+ phy_print_status(phy);
+}
+
+static bool gbe_phy_link_status(struct gbe_slave *slave)
+{
+ return !slave->phy || slave->phy->link;
+}
+
+#define RGMII_REG_STATUS_LINK BIT(0)
+
+static void netcp_2u_rgmii_get_port_link(struct gbe_priv *gbe_dev, bool *status)
+{
+ u32 val = 0;
+
+ val = readl(GBE_REG_ADDR(gbe_dev, ss_regs, rgmii_status));
+ *status = !!(val & RGMII_REG_STATUS_LINK);
+}
+
+static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
+ struct gbe_slave *slave,
+ struct net_device *ndev)
+{
+ bool sw_link_state = true, phy_link_state;
+ int sp = slave->slave_num, link_state;
+
+ if (!slave->open)
+ return;
+
+ if (SLAVE_LINK_IS_RGMII(slave))
+ netcp_2u_rgmii_get_port_link(gbe_dev,
+ &sw_link_state);
+ if (SLAVE_LINK_IS_SGMII(slave))
+ sw_link_state =
+ netcp_sgmii_get_port_link(SGMII_BASE(gbe_dev, sp), sp);
+
+ phy_link_state = gbe_phy_link_status(slave);
+ link_state = phy_link_state & sw_link_state;
+
+ if (atomic_xchg(&slave->link_state, link_state) != link_state)
+ netcp_ethss_link_state_action(gbe_dev, ndev, slave,
+ link_state);
+}
+
+static void xgbe_adjust_link(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
+ if (!gbe_intf)
+ return;
+
+ netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
+ ndev);
+}
+
+static void gbe_adjust_link(struct net_device *ndev)
+{
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_intf *gbe_intf;
+
+ gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
+ if (!gbe_intf)
+ return;
+
+ netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
+ ndev);
+}
+
+static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
+{
+ struct gbe_priv *gbe_dev = netdev_priv(ndev);
+ struct gbe_slave *slave;
+
+ for_each_sec_slave(slave, gbe_dev)
+ netcp_ethss_update_link_state(gbe_dev, slave, NULL);
+}
+
+/* Reset EMAC
+ * Soft reset is set and polled until clear, or until a timeout occurs
+ */
+static int gbe_port_reset(struct gbe_slave *slave)
+{
+ u32 i, v;
+
+ /* Set the soft reset bit */
+ writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
+
+ /* Wait for the bit to clear */
+ for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
+ v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
+ if ((v & SOFT_RESET_MASK) != SOFT_RESET)
+ return 0;
+ }
+
+ /* Timeout on the reset */
+ return GMACSL_RET_WARN_RESET_INCOMPLETE;
+}
+
+/* Configure EMAC */
+static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
+ int max_rx_len)
+{
+ void __iomem *rx_maxlen_reg;
+ u32 xgmii_mode;
+
+ if (max_rx_len > NETCP_MAX_FRAME_SIZE)
+ max_rx_len = NETCP_MAX_FRAME_SIZE;
+
+ /* Enable correct MII mode at SS level */
+ if (IS_SS_ID_XGBE(gbe_dev) &&
+ (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
+ xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
+ xgmii_mode |= (1 << slave->slave_num);
+ writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
+ }
+
+ if (IS_SS_ID_MU(gbe_dev))
+ rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
+ else
+ rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
+
+ writel(max_rx_len, rx_maxlen_reg);
+ writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
+}
+
+static void gbe_sgmii_rtreset(struct gbe_priv *priv,
+ struct gbe_slave *slave, bool set)
+{
+ if (SLAVE_LINK_IS_XGMII(slave))
+ return;
+
+ netcp_sgmii_rtreset(SGMII_BASE(priv, slave->slave_num),
+ slave->slave_num, set);
+}
+
+static void gbe_slave_stop(struct gbe_intf *intf)
+{
+ struct gbe_priv *gbe_dev = intf->gbe_dev;
+ struct gbe_slave *slave = intf->slave;
+
+ if (!IS_SS_ID_2U(gbe_dev))
+ gbe_sgmii_rtreset(gbe_dev, slave, true);
+ gbe_port_reset(slave);
+ /* Disable forwarding */
+ cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
+ cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
+ 1 << slave->port_num, 0, 0);
+
+ if (!slave->phy)
+ return;
+
+ phy_stop(slave->phy);
+ phy_disconnect(slave->phy);
+ slave->phy = NULL;
+}
+
+static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
+{
+ if (SLAVE_LINK_IS_XGMII(slave))
+ return;
+
+ netcp_sgmii_reset(SGMII_BASE(priv, slave->slave_num), slave->slave_num);
+ netcp_sgmii_config(SGMII_BASE(priv, slave->slave_num), slave->slave_num,
+ slave->link_interface);
+}
+
+static int gbe_slave_open(struct gbe_intf *gbe_intf)
+{
+ struct gbe_priv *priv = gbe_intf->gbe_dev;
+ struct gbe_slave *slave = gbe_intf->slave;
+ phy_interface_t phy_mode;
+ bool has_phy = false;
+ int err;
+
+ void (*hndlr)(struct net_device *) = gbe_adjust_link;
+
+ if (!IS_SS_ID_2U(priv))
+ gbe_sgmii_config(priv, slave);
+ gbe_port_reset(slave);
+ if (!IS_SS_ID_2U(priv))
+ gbe_sgmii_rtreset(priv, slave, false);
+ gbe_port_config(priv, slave, priv->rx_packet_max);
+ gbe_set_slave_mac(slave, gbe_intf);
+ /* For NU & 2U switch, map the vlan priorities to zero
+ * as we only configure to use priority 0
+ */
+ if (IS_SS_ID_MU(priv))
+ writel(HOST_TX_PRI_MAP_DEFAULT,
+ GBE_REG_ADDR(slave, port_regs, rx_pri_map));
+
+ /* enable forwarding */
+ cpsw_ale_control_set(priv->ale, slave->port_num,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+ cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
+ 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
+
+ if (slave->link_interface == SGMII_LINK_MAC_PHY) {
+ has_phy = true;
+ phy_mode = PHY_INTERFACE_MODE_SGMII;
+ slave->phy_port_t = PORT_MII;
+ } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
+ has_phy = true;
+ err = of_get_phy_mode(slave->node, &phy_mode);
+ /* if phy-mode is not present, default to
+ * PHY_INTERFACE_MODE_RGMII
+ */
+ if (err)
+ phy_mode = PHY_INTERFACE_MODE_RGMII;
+
+ if (!phy_interface_mode_is_rgmii(phy_mode)) {
+ dev_err(priv->dev,
+ "Unsupported phy mode %d\n", phy_mode);
+ return -EINVAL;
+ }
+ slave->phy_port_t = PORT_MII;
+ } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
+ has_phy = true;
+ phy_mode = PHY_INTERFACE_MODE_NA;
+ slave->phy_port_t = PORT_FIBRE;
+ }
+
+ if (has_phy) {
+ if (IS_SS_ID_XGBE(priv))
+ hndlr = xgbe_adjust_link;
+
+ slave->phy = of_phy_connect(gbe_intf->ndev,
+ slave->phy_node,
+ hndlr, 0,
+ phy_mode);
+ if (!slave->phy) {
+ dev_err(priv->dev, "phy not found on slave %d\n",
+ slave->slave_num);
+ return -ENODEV;
+ }
+ dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
+ phydev_name(slave->phy));
+ phy_start(slave->phy);
+ }
+ return 0;
+}
+
+static void gbe_init_host_port(struct gbe_priv *priv)
+{
+ int bypass_en = 1;
+
+ /* Host Tx Pri */
+ if (IS_SS_ID_NU(priv) || IS_SS_ID_XGBE(priv))
+ writel(HOST_TX_PRI_MAP_DEFAULT,
+ GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
+
+ /* Max length register */
+ writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
+ rx_maxlen));
+
+ cpsw_ale_start(priv->ale);
+
+ if (priv->enable_ale)
+ bypass_en = 0;
+
+ cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
+
+ cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
+
+ cpsw_ale_control_set(priv->ale, priv->host_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
+
+ cpsw_ale_control_set(priv->ale, 0,
+ ALE_PORT_UNKNOWN_VLAN_MEMBER,
+ GBE_PORT_MASK(priv->ale_ports));
+
+ cpsw_ale_control_set(priv->ale, 0,
+ ALE_PORT_UNKNOWN_MCAST_FLOOD,
+ GBE_PORT_MASK(priv->ale_ports - 1));
+
+ cpsw_ale_control_set(priv->ale, 0,
+ ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
+ GBE_PORT_MASK(priv->ale_ports));
+
+ cpsw_ale_control_set(priv->ale, 0,
+ ALE_PORT_UNTAGGED_EGRESS,
+ GBE_PORT_MASK(priv->ale_ports));
+}
+
+static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ u16 vlan_id;
+
+ cpsw_ale_add_mcast(gbe_dev->ale, addr,
+ GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
+ ALE_MCAST_FWD_2);
+ for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+ cpsw_ale_add_mcast(gbe_dev->ale, addr,
+ GBE_PORT_MASK(gbe_dev->ale_ports),
+ ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
+ }
+}
+
+static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ u16 vlan_id;
+
+ cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
+
+ for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
+ cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
+ ALE_VLAN, vlan_id);
+}
+
+static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ u16 vlan_id;
+
+ cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
+
+ for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+ cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
+ }
+}
+
+static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ u16 vlan_id;
+
+ cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
+
+ for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
+ cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
+ ALE_VLAN, vlan_id);
+ }
+}
+
+static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
+ naddr->addr, naddr->type);
+
+ switch (naddr->type) {
+ case ADDR_MCAST:
+ case ADDR_BCAST:
+ gbe_add_mcast_addr(gbe_intf, naddr->addr);
+ break;
+ case ADDR_UCAST:
+ case ADDR_DEV:
+ gbe_add_ucast_addr(gbe_intf, naddr->addr);
+ break;
+ case ADDR_ANY:
+ /* nothing to do for promiscuous */
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
+ naddr->addr, naddr->type);
+
+ switch (naddr->type) {
+ case ADDR_MCAST:
+ case ADDR_BCAST:
+ gbe_del_mcast_addr(gbe_intf, naddr->addr);
+ break;
+ case ADDR_UCAST:
+ case ADDR_DEV:
+ gbe_del_ucast_addr(gbe_intf, naddr->addr);
+ break;
+ case ADDR_ANY:
+ /* nothing to do for promiscuous */
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int gbe_add_vid(void *intf_priv, int vid)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ set_bit(vid, gbe_intf->active_vlans);
+
+ cpsw_ale_add_vlan(gbe_dev->ale, vid,
+ GBE_PORT_MASK(gbe_dev->ale_ports),
+ GBE_MASK_NO_PORTS,
+ GBE_PORT_MASK(gbe_dev->ale_ports),
+ GBE_PORT_MASK(gbe_dev->ale_ports - 1));
+
+ return 0;
+}
+
+static int gbe_del_vid(void *intf_priv, int vid)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
+ clear_bit(vid, gbe_intf->active_vlans);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+
+static void gbe_txtstamp(void *context, struct sk_buff *skb)
+{
+ struct gbe_intf *gbe_intf = context;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ cpts_tx_timestamp(gbe_dev->cpts, skb);
+}
+
+static bool gbe_need_txtstamp(struct gbe_intf *gbe_intf,
+ const struct netcp_packet *p_info)
+{
+ struct sk_buff *skb = p_info->skb;
+
+ return cpts_can_timestamp(gbe_intf->gbe_dev->cpts, skb);
+}
+
+static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
+ struct netcp_packet *p_info)
+{
+ struct phy_device *phydev = p_info->skb->dev->phydev;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) ||
+ !gbe_dev->tx_ts_enabled)
+ return 0;
+
+ /* If phy has the txtstamp api, assume it will do it.
+ * We mark it here because skb_tx_timestamp() is called
+ * after all the txhooks are called.
+ */
+ if (phy_has_txtstamp(phydev)) {
+ skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ return 0;
+ }
+
+ if (gbe_need_txtstamp(gbe_intf, p_info)) {
+ p_info->txtstamp = gbe_txtstamp;
+ p_info->ts_context = (void *)gbe_intf;
+ skb_shinfo(p_info->skb)->tx_flags |= SKBTX_IN_PROGRESS;
+ }
+
+ return 0;
+}
+
+static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info)
+{
+ struct phy_device *phydev = p_info->skb->dev->phydev;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ if (p_info->rxtstamp_complete)
+ return 0;
+
+ if (phy_has_rxtstamp(phydev)) {
+ p_info->rxtstamp_complete = true;
+ return 0;
+ }
+
+ if (gbe_dev->rx_ts_enabled)
+ cpts_rx_timestamp(gbe_dev->cpts, p_info->skb);
+
+ p_info->rxtstamp_complete = true;
+
+ return 0;
+}
+
+static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct cpts *cpts = gbe_dev->cpts;
+ struct hwtstamp_config cfg;
+
+ if (!cpts)
+ return -EOPNOTSUPP;
+
+ cfg.flags = 0;
+ cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+ cfg.rx_filter = gbe_dev->rx_ts_enabled;
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void gbe_hwtstamp(struct gbe_intf *gbe_intf)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct gbe_slave *slave = gbe_intf->slave;
+ u32 ts_en, seq_id, ctl;
+
+ if (!gbe_dev->rx_ts_enabled &&
+ !gbe_dev->tx_ts_enabled) {
+ writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl));
+ return;
+ }
+
+ seq_id = (30 << TS_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
+ ts_en = EVENT_MSG_BITS << TS_MSG_TYPE_EN_SHIFT;
+ ctl = ETH_P_1588 | TS_TTL_NONZERO |
+ (slave->ts_ctl.dst_port_map << TS_CTL_DST_PORT_SHIFT) |
+ (slave->ts_ctl.uni ? TS_UNI_EN :
+ slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT);
+
+ if (gbe_dev->tx_ts_enabled)
+ ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN);
+
+ if (gbe_dev->rx_ts_enabled)
+ ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN);
+
+ writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl));
+ writel(seq_id, GBE_REG_ADDR(slave, port_regs, ts_seq_ltype));
+ writel(ctl, GBE_REG_ADDR(slave, port_regs, ts_ctl_ltype2));
+}
+
+static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr)
+{
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct cpts *cpts = gbe_dev->cpts;
+ struct hwtstamp_config cfg;
+
+ if (!cpts)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
+ return -EFAULT;
+
+ switch (cfg.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ gbe_dev->tx_ts_enabled = 0;
+ break;
+ case HWTSTAMP_TX_ON:
+ gbe_dev->tx_ts_enabled = 1;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ switch (cfg.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE;
+ break;
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
+ break;
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ gbe_hwtstamp(gbe_intf);
+
+ return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
+}
+
+static void gbe_register_cpts(struct gbe_priv *gbe_dev)
+{
+ if (!gbe_dev->cpts)
+ return;
+
+ if (gbe_dev->cpts_registered > 0)
+ goto done;
+
+ if (cpts_register(gbe_dev->cpts)) {
+ dev_err(gbe_dev->dev, "error registering cpts device\n");
+ return;
+ }
+
+done:
+ ++gbe_dev->cpts_registered;
+}
+
+static void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
+{
+ if (!gbe_dev->cpts || (gbe_dev->cpts_registered <= 0))
+ return;
+
+ if (--gbe_dev->cpts_registered)
+ return;
+
+ cpts_unregister(gbe_dev->cpts);
+}
+#else
+static inline int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf,
+ struct netcp_packet *p_info)
+{
+ return 0;
+}
+
+static inline int gbe_rxtstamp(struct gbe_intf *gbe_intf,
+ struct netcp_packet *p_info)
+{
+ return 0;
+}
+
+static inline int gbe_hwtstamp(struct gbe_intf *gbe_intf,
+ struct ifreq *ifr, int cmd)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void gbe_register_cpts(struct gbe_priv *gbe_dev)
+{
+}
+
+static inline void gbe_unregister_cpts(struct gbe_priv *gbe_dev)
+{
+}
+
+static inline int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *req)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *req)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_TI_CPTS */
+
+static int gbe_set_rx_mode(void *intf_priv, bool promisc)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct cpsw_ale *ale = gbe_dev->ale;
+ unsigned long timeout;
+ int i, ret = -ETIMEDOUT;
+
+ /* Disable(1)/Enable(0) Learn for all ports (host is port 0 and
+ * slaves are port 1 and up
+ */
+ for (i = 0; i <= gbe_dev->num_slaves; i++) {
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NOLEARN, !!promisc);
+ cpsw_ale_control_set(ale, i,
+ ALE_PORT_NO_SA_UPDATE, !!promisc);
+ }
+
+ if (!promisc) {
+ /* Don't Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
+ dev_vdbg(gbe_dev->dev, "promiscuous mode disabled\n");
+ return 0;
+ }
+
+ timeout = jiffies + HZ;
+
+ /* Clear All Untouched entries */
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+ do {
+ cpu_relax();
+ if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT)) {
+ ret = 0;
+ break;
+ }
+
+ } while (time_after(timeout, jiffies));
+
+ /* Make sure it is not a false timeout */
+ if (ret && !cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
+ return ret;
+
+ cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
+
+ /* Clear all mcast from ALE */
+ cpsw_ale_flush_multicast(ale,
+ GBE_PORT_MASK(gbe_dev->ale_ports),
+ -1);
+
+ /* Flood All Unicast Packets to Host port */
+ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
+ dev_vdbg(gbe_dev->dev, "promiscuous mode enabled\n");
+ return ret;
+}
+
+static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct phy_device *phy = gbe_intf->slave->phy;
+
+ if (!phy_has_hwtstamp(phy)) {
+ switch (cmd) {
+ case SIOCGHWTSTAMP:
+ return gbe_hwtstamp_get(gbe_intf, req);
+ case SIOCSHWTSTAMP:
+ return gbe_hwtstamp_set(gbe_intf, req);
+ }
+ }
+
+ if (phy)
+ return phy_mii_ioctl(phy, req, cmd);
+
+ return -EOPNOTSUPP;
+}
+
+static void netcp_ethss_timer(struct timer_list *t)
+{
+ struct gbe_priv *gbe_dev = from_timer(gbe_dev, t, timer);
+ struct gbe_intf *gbe_intf;
+ struct gbe_slave *slave;
+
+ /* Check & update SGMII link state of interfaces */
+ for_each_intf(gbe_intf, gbe_dev) {
+ if (!gbe_intf->slave->open)
+ continue;
+ netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
+ gbe_intf->ndev);
+ }
+
+ /* Check & update SGMII link state of secondary ports */
+ for_each_sec_slave(slave, gbe_dev) {
+ netcp_ethss_update_link_state(gbe_dev, slave, NULL);
+ }
+
+ /* A timer runs as a BH, no need to block them */
+ spin_lock(&gbe_dev->hw_stats_lock);
+
+ if (IS_SS_ID_VER_14(gbe_dev))
+ gbe_update_stats_ver14(gbe_dev, NULL);
+ else
+ gbe_update_stats(gbe_dev, NULL);
+
+ spin_unlock(&gbe_dev->hw_stats_lock);
+
+ gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
+ add_timer(&gbe_dev->timer);
+}
+
+static int gbe_txhook(int order, void *data, struct netcp_packet *p_info)
+{
+ struct gbe_intf *gbe_intf = data;
+
+ p_info->tx_pipe = &gbe_intf->tx_pipe;
+
+ return gbe_txtstamp_mark_pkt(gbe_intf, p_info);
+}
+
+static int gbe_rxhook(int order, void *data, struct netcp_packet *p_info)
+{
+ struct gbe_intf *gbe_intf = data;
+
+ return gbe_rxtstamp(gbe_intf, p_info);
+}
+
+static int gbe_open(void *intf_priv, struct net_device *ndev)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_slave *slave = gbe_intf->slave;
+ int port_num = slave->port_num;
+ u32 reg, val;
+ int ret;
+
+ reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
+ dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
+ GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
+ GBE_RTL_VERSION(reg), GBE_IDENT(reg));
+
+ /* For 10G and on NetCP 1.5, use directed to port */
+ if (IS_SS_ID_XGBE(gbe_dev) || IS_SS_ID_MU(gbe_dev))
+ gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
+
+ if (gbe_dev->enable_ale)
+ gbe_intf->tx_pipe.switch_to_port = 0;
+ else
+ gbe_intf->tx_pipe.switch_to_port = port_num;
+
+ dev_dbg(gbe_dev->dev,
+ "opened TX channel %s: %p with to port %d, flags %d\n",
+ gbe_intf->tx_pipe.dma_chan_name,
+ gbe_intf->tx_pipe.dma_channel,
+ gbe_intf->tx_pipe.switch_to_port,
+ gbe_intf->tx_pipe.flags);
+
+ gbe_slave_stop(gbe_intf);
+
+ /* disable priority elevation and enable statistics on all ports */
+ writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
+
+ /* Control register */
+ val = GBE_CTL_P0_ENABLE;
+ if (IS_SS_ID_MU(gbe_dev)) {
+ val |= ETH_SW_CTL_P0_TX_CRC_REMOVE;
+ netcp->hw_cap = ETH_SW_CAN_REMOVE_ETH_FCS;
+ }
+ writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, control));
+
+ /* All statistics enabled and STAT AB visible by default */
+ writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
+ stat_port_en));
+
+ ret = gbe_slave_open(gbe_intf);
+ if (ret)
+ goto fail;
+
+ netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
+ netcp_register_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
+
+ slave->open = true;
+ netcp_ethss_update_link_state(gbe_dev, slave, ndev);
+
+ gbe_register_cpts(gbe_dev);
+
+ return 0;
+
+fail:
+ gbe_slave_stop(gbe_intf);
+ return ret;
+}
+
+static int gbe_close(void *intf_priv, struct net_device *ndev)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+ struct netcp_intf *netcp = netdev_priv(ndev);
+ struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
+
+ gbe_unregister_cpts(gbe_dev);
+
+ gbe_slave_stop(gbe_intf);
+
+ netcp_unregister_rxhook(netcp, GBE_RXHOOK_ORDER, gbe_rxhook, gbe_intf);
+ netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_txhook, gbe_intf);
+
+ gbe_intf->slave->open = false;
+ atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_TI_CPTS)
+static void init_slave_ts_ctl(struct gbe_slave *slave)
+{
+ slave->ts_ctl.uni = 1;
+ slave->ts_ctl.dst_port_map =
+ (TS_CTL_DST_PORT >> TS_CTL_DST_PORT_SHIFT) & 0x3;
+ slave->ts_ctl.maddr_map =
+ (TS_CTL_MADDR_ALL >> TS_CTL_MADDR_SHIFT) & 0x1f;
+}
+
+#else
+static void init_slave_ts_ctl(struct gbe_slave *slave)
+{
+}
+#endif /* CONFIG_TI_CPTS */
+
+static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
+ struct device_node *node)
+{
+ int port_reg_num;
+ u32 port_reg_ofs, emac_reg_ofs;
+ u32 port_reg_blk_sz, emac_reg_blk_sz;
+
+ if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
+ dev_err(gbe_dev->dev, "missing slave-port parameter\n");
+ return -EINVAL;
+ }
+
+ if (of_property_read_u32(node, "link-interface",
+ &slave->link_interface)) {
+ dev_warn(gbe_dev->dev,
+ "missing link-interface value defaulting to 1G mac-phy link\n");
+ slave->link_interface = SGMII_LINK_MAC_PHY;
+ }
+
+ slave->node = node;
+ slave->open = false;
+ if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
+ (slave->link_interface == RGMII_LINK_MAC_PHY) ||
+ (slave->link_interface == XGMII_LINK_MAC_PHY))
+ slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
+
+ if (slave->link_interface >= XGMII_LINK_MAC_PHY)
+ slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
+ else
+ slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
+
+ /* Emac regs memmap are contiguous but port regs are not */
+ port_reg_num = slave->slave_num;
+ if (IS_SS_ID_VER_14(gbe_dev)) {
+ if (slave->slave_num > 1) {
+ port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
+ port_reg_num -= 2;
+ } else {
+ port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
+ }
+ emac_reg_ofs = GBE13_EMAC_OFFSET;
+ port_reg_blk_sz = 0x30;
+ emac_reg_blk_sz = 0x40;
+ } else if (IS_SS_ID_MU(gbe_dev)) {
+ port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
+ emac_reg_ofs = GBENU_EMAC_OFFSET;
+ port_reg_blk_sz = 0x1000;
+ emac_reg_blk_sz = 0x1000;
+ } else if (IS_SS_ID_XGBE(gbe_dev)) {
+ port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
+ emac_reg_ofs = XGBE10_EMAC_OFFSET;
+ port_reg_blk_sz = 0x30;
+ emac_reg_blk_sz = 0x40;
+ } else {
+ dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
+ gbe_dev->ss_version);
+ return -EINVAL;
+ }
+
+ slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
+ (port_reg_blk_sz * port_reg_num);
+ slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
+ (emac_reg_blk_sz * slave->slave_num);
+
+ if (IS_SS_ID_VER_14(gbe_dev)) {
+ /* Initialize slave port register offsets */
+ GBE_SET_REG_OFS(slave, port_regs, port_vlan);
+ GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
+ GBE_SET_REG_OFS(slave, port_regs, sa_lo);
+ GBE_SET_REG_OFS(slave, port_regs, sa_hi);
+ GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
+ GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+ GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
+ GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+ GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
+
+ /* Initialize EMAC register offsets */
+ GBE_SET_REG_OFS(slave, emac_regs, mac_control);
+ GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
+ GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
+
+ } else if (IS_SS_ID_MU(gbe_dev)) {
+ /* Initialize slave port register offsets */
+ GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
+ GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
+ GBENU_SET_REG_OFS(slave, port_regs, rx_pri_map);
+ GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
+ GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+ GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
+ GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
+
+ /* Initialize EMAC register offsets */
+ GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
+ GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
+
+ } else if (IS_SS_ID_XGBE(gbe_dev)) {
+ /* Initialize slave port register offsets */
+ XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
+ XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
+ XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
+ XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
+ XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
+
+ /* Initialize EMAC register offsets */
+ XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
+ XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
+ XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
+ }
+
+ atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
+
+ init_slave_ts_ctl(slave);
+ return 0;
+}
+
+static void init_secondary_ports(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ struct device *dev = gbe_dev->dev;
+ phy_interface_t phy_mode;
+ struct gbe_priv **priv;
+ struct device_node *port;
+ struct gbe_slave *slave;
+ bool mac_phy_link = false;
+
+ for_each_child_of_node(node, port) {
+ slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
+ if (!slave) {
+ dev_err(dev, "memory alloc failed for secondary port(%pOFn), skipping...\n",
+ port);
+ continue;
+ }
+
+ if (init_slave(gbe_dev, slave, port)) {
+ dev_err(dev,
+ "Failed to initialize secondary port(%pOFn), skipping...\n",
+ port);
+ devm_kfree(dev, slave);
+ continue;
+ }
+
+ if (!IS_SS_ID_2U(gbe_dev))
+ gbe_sgmii_config(gbe_dev, slave);
+ gbe_port_reset(slave);
+ gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
+ list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
+ gbe_dev->num_slaves++;
+ if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
+ (slave->link_interface == XGMII_LINK_MAC_PHY))
+ mac_phy_link = true;
+
+ slave->open = true;
+ if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+ of_node_put(port);
+ break;
+ }
+ }
+
+ /* of_phy_connect() is needed only for MAC-PHY interface */
+ if (!mac_phy_link)
+ return;
+
+ /* Allocate dummy netdev device for attaching to phy device */
+ gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
+ NET_NAME_UNKNOWN, ether_setup);
+ if (!gbe_dev->dummy_ndev) {
+ dev_err(dev,
+ "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
+ return;
+ }
+ priv = netdev_priv(gbe_dev->dummy_ndev);
+ *priv = gbe_dev;
+
+ if (slave->link_interface == SGMII_LINK_MAC_PHY) {
+ phy_mode = PHY_INTERFACE_MODE_SGMII;
+ slave->phy_port_t = PORT_MII;
+ } else if (slave->link_interface == RGMII_LINK_MAC_PHY) {
+ phy_mode = PHY_INTERFACE_MODE_RGMII;
+ slave->phy_port_t = PORT_MII;
+ } else {
+ phy_mode = PHY_INTERFACE_MODE_NA;
+ slave->phy_port_t = PORT_FIBRE;
+ }
+
+ for_each_sec_slave(slave, gbe_dev) {
+ if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != RGMII_LINK_MAC_PHY) &&
+ (slave->link_interface != XGMII_LINK_MAC_PHY))
+ continue;
+ slave->phy =
+ of_phy_connect(gbe_dev->dummy_ndev,
+ slave->phy_node,
+ gbe_adjust_link_sec_slaves,
+ 0, phy_mode);
+ if (!slave->phy) {
+ dev_err(dev, "phy not found for slave %d\n",
+ slave->slave_num);
+ } else {
+ dev_dbg(dev, "phy found: id is: 0x%s\n",
+ phydev_name(slave->phy));
+ phy_start(slave->phy);
+ }
+ }
+}
+
+static void free_secondary_ports(struct gbe_priv *gbe_dev)
+{
+ struct gbe_slave *slave;
+
+ while (!list_empty(&gbe_dev->secondary_slaves)) {
+ slave = first_sec_slave(gbe_dev);
+
+ if (slave->phy)
+ phy_disconnect(slave->phy);
+ list_del(&slave->slave_list);
+ }
+ if (gbe_dev->dummy_ndev)
+ free_netdev(gbe_dev->dummy_ndev);
+}
+
+static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ struct resource res;
+ void __iomem *regs;
+ int ret, i;
+
+ ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't xlate xgbe of node(%pOFn) ss address at %d\n",
+ node, XGBE_SS_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->ss_regs = regs;
+
+ ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't xlate xgbe of node(%pOFn) sm address at %d\n",
+ node, XGBE_SM_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->switch_regs = regs;
+
+ ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't xlate xgbe serdes of node(%pOFn) address at %d\n",
+ node, XGBE_SERDES_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->xgbe_serdes_regs = regs;
+
+ gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+ gbe_dev->et_stats = xgbe10_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
+
+ gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats, sizeof(u64),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats) {
+ dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ gbe_dev->hw_stats_prev =
+ devm_kcalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats, sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ gbe_dev->ss_version = XGBE_SS_VERSION_10;
+ gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
+ XGBE10_SGMII_MODULE_OFFSET;
+ gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
+
+ for (i = 0; i < gbe_dev->max_num_ports; i++)
+ gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
+ XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
+
+ gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
+ gbe_dev->cpts_reg = gbe_dev->switch_regs + XGBE10_CPTS_OFFSET;
+ gbe_dev->ale_ports = gbe_dev->max_num_ports;
+ gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
+ gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
+
+ /* Subsystem registers */
+ XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+ XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
+
+ /* Switch module registers */
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+ XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
+
+ /* Host port registers */
+ XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+ XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
+ XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+ return 0;
+}
+
+static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ struct resource res;
+ void __iomem *regs;
+ int ret;
+
+ ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't translate of node(%pOFn) of gbe ss address at %d\n",
+ node, GBE_SS_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->ss_regs = regs;
+ gbe_dev->ss_version = readl(gbe_dev->ss_regs);
+ return 0;
+}
+
+static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ struct resource res;
+ void __iomem *regs;
+ int i, ret;
+
+ ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't translate of gbe node(%pOFn) address at index %d\n",
+ node, GBE_SGMII34_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev,
+ "Failed to map gbe sgmii port34 register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->sgmii_port34_regs = regs;
+
+ ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't translate of gbe node(%pOFn) address at index %d\n",
+ node, GBE_SM_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev,
+ "Failed to map gbe switch module register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->switch_regs = regs;
+
+ gbe_dev->num_stats_mods = gbe_dev->max_num_slaves;
+ gbe_dev->et_stats = gbe13_et_stats;
+ gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
+
+ gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats, sizeof(u64),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats) {
+ dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ gbe_dev->hw_stats_prev =
+ devm_kcalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats, sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
+ gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
+
+ /* K2HK has only 2 hw stats modules visible at a time, so
+ * module 0 & 2 points to one base and
+ * module 1 & 3 points to the other base
+ */
+ for (i = 0; i < gbe_dev->max_num_slaves; i++) {
+ gbe_dev->hw_stats_regs[i] =
+ gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
+ (GBE_HW_STATS_REG_MAP_SZ * (i & 0x1));
+ }
+
+ gbe_dev->cpts_reg = gbe_dev->switch_regs + GBE13_CPTS_OFFSET;
+ gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
+ gbe_dev->ale_ports = gbe_dev->max_num_ports;
+ gbe_dev->host_port = GBE13_HOST_PORT_NUM;
+ gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
+
+ /* Subsystem registers */
+ GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+
+ /* Switch module registers */
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+ GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
+
+ /* Host port registers */
+ GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+ GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+ return 0;
+}
+
+static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
+ struct device_node *node)
+{
+ struct resource res;
+ void __iomem *regs;
+ int i, ret;
+
+ gbe_dev->num_stats_mods = gbe_dev->max_num_ports;
+ gbe_dev->et_stats = gbenu_et_stats;
+
+ if (IS_SS_ID_MU(gbe_dev))
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
+ else
+ gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
+ GBENU_ET_STATS_PORT_SIZE;
+
+ gbe_dev->hw_stats = devm_kcalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats, sizeof(u64),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats) {
+ dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ gbe_dev->hw_stats_prev =
+ devm_kcalloc(gbe_dev->dev,
+ gbe_dev->num_et_stats, sizeof(u32),
+ GFP_KERNEL);
+ if (!gbe_dev->hw_stats_prev) {
+ dev_err(gbe_dev->dev,
+ "hw_stats_prev memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
+ if (ret) {
+ dev_err(gbe_dev->dev,
+ "Can't translate of gbenu node(%pOFn) addr at index %d\n",
+ node, GBENU_SM_REG_INDEX);
+ return ret;
+ }
+
+ regs = devm_ioremap_resource(gbe_dev->dev, &res);
+ if (IS_ERR(regs)) {
+ dev_err(gbe_dev->dev,
+ "Failed to map gbenu switch module register base\n");
+ return PTR_ERR(regs);
+ }
+ gbe_dev->switch_regs = regs;
+
+ if (!IS_SS_ID_2U(gbe_dev))
+ gbe_dev->sgmii_port_regs =
+ gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
+
+ /* Although sgmii modules are mem mapped to one contiguous
+ * region on GBENU devices, setting sgmii_port34_regs allows
+ * consistent code when accessing sgmii api
+ */
+ gbe_dev->sgmii_port34_regs = gbe_dev->sgmii_port_regs +
+ (2 * GBENU_SGMII_MODULE_SIZE);
+
+ gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
+
+ for (i = 0; i < (gbe_dev->max_num_ports); i++)
+ gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
+ GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
+
+ gbe_dev->cpts_reg = gbe_dev->switch_regs + GBENU_CPTS_OFFSET;
+ gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
+ gbe_dev->ale_ports = gbe_dev->max_num_ports;
+ gbe_dev->host_port = GBENU_HOST_PORT_NUM;
+ gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
+
+ /* Subsystem registers */
+ GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
+ /* ok to set for MU, but used by 2U only */
+ GBENU_SET_REG_OFS(gbe_dev, ss_regs, rgmii_status);
+
+ /* Switch module registers */
+ GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
+ GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
+ GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
+ GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
+
+ /* Host port registers */
+ GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
+ GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
+
+ /* For NU only. 2U does not need tx_pri_map.
+ * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
+ * while 2U has only 1 such thread
+ */
+ GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
+ return 0;
+}
+
+static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
+ struct device_node *node, void **inst_priv)
+{
+ struct device_node *interfaces, *interface, *cpts_node;
+ struct device_node *secondary_ports;
+ struct cpsw_ale_params ale_params;
+ struct gbe_priv *gbe_dev;
+ u32 slave_num;
+ int i, ret = 0;
+
+ if (!node) {
+ dev_err(dev, "device tree info unavailable\n");
+ return -ENODEV;
+ }
+
+ gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
+ if (!gbe_dev)
+ return -ENOMEM;
+
+ if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
+ of_device_is_compatible(node, "ti,netcp-gbe")) {
+ gbe_dev->max_num_slaves = 4;
+ } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
+ gbe_dev->max_num_slaves = 8;
+ } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
+ gbe_dev->max_num_slaves = 1;
+ gbe_module.set_rx_mode = gbe_set_rx_mode;
+ } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
+ gbe_dev->max_num_slaves = 2;
+ } else {
+ dev_err(dev, "device tree node for unknown device\n");
+ return -EINVAL;
+ }
+ gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
+
+ gbe_dev->dev = dev;
+ gbe_dev->netcp_device = netcp_device;
+ gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
+
+ /* init the hw stats lock */
+ spin_lock_init(&gbe_dev->hw_stats_lock);
+
+ if (of_find_property(node, "enable-ale", NULL)) {
+ gbe_dev->enable_ale = true;
+ dev_info(dev, "ALE enabled\n");
+ } else {
+ gbe_dev->enable_ale = false;
+ dev_dbg(dev, "ALE bypass enabled*\n");
+ }
+
+ ret = of_property_read_u32(node, "tx-queue",
+ &gbe_dev->tx_queue_id);
+ if (ret < 0) {
+ dev_err(dev, "missing tx_queue parameter\n");
+ gbe_dev->tx_queue_id = GBE_TX_QUEUE;
+ }
+
+ ret = of_property_read_string(node, "tx-channel",
+ &gbe_dev->dma_chan_name);
+ if (ret < 0) {
+ dev_err(dev, "missing \"tx-channel\" parameter\n");
+ return -EINVAL;
+ }
+
+ if (of_node_name_eq(node, "gbe")) {
+ ret = get_gbe_resource_version(gbe_dev, node);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
+
+ if (IS_SS_ID_VER_14(gbe_dev))
+ ret = set_gbe_ethss14_priv(gbe_dev, node);
+ else if (IS_SS_ID_MU(gbe_dev))
+ ret = set_gbenu_ethss_priv(gbe_dev, node);
+ else
+ ret = -ENODEV;
+
+ } else if (of_node_name_eq(node, "xgbe")) {
+ ret = set_xgbe_ethss10_priv(gbe_dev, node);
+ if (ret)
+ return ret;
+ ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
+ gbe_dev->ss_regs);
+ } else {
+ dev_err(dev, "unknown GBE node(%pOFn)\n", node);
+ ret = -ENODEV;
+ }
+
+ if (ret)
+ return ret;
+
+ interfaces = of_get_child_by_name(node, "interfaces");
+ if (!interfaces)
+ dev_err(dev, "could not find interfaces\n");
+
+ ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
+ gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
+ if (ret) {
+ of_node_put(interfaces);
+ return ret;
+ }
+
+ ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
+ if (ret) {
+ of_node_put(interfaces);
+ return ret;
+ }
+
+ /* Create network interfaces */
+ INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
+ for_each_child_of_node(interfaces, interface) {
+ ret = of_property_read_u32(interface, "slave-port", &slave_num);
+ if (ret) {
+ dev_err(dev, "missing slave-port parameter, skipping interface configuration for %pOFn\n",
+ interface);
+ continue;
+ }
+ gbe_dev->num_slaves++;
+ if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) {
+ of_node_put(interface);
+ break;
+ }
+ }
+ of_node_put(interfaces);
+
+ if (!gbe_dev->num_slaves)
+ dev_warn(dev, "No network interface configured\n");
+
+ /* Initialize Secondary slave ports */
+ secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
+ INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
+ if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves))
+ init_secondary_ports(gbe_dev, secondary_ports);
+ of_node_put(secondary_ports);
+
+ if (!gbe_dev->num_slaves) {
+ dev_err(dev,
+ "No network interface or secondary ports configured\n");
+ ret = -ENODEV;
+ goto free_sec_ports;
+ }
+
+ memset(&ale_params, 0, sizeof(ale_params));
+ ale_params.dev = gbe_dev->dev;
+ ale_params.ale_regs = gbe_dev->ale_reg;
+ ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
+ ale_params.ale_ports = gbe_dev->ale_ports;
+ ale_params.dev_id = "cpsw";
+ if (IS_SS_ID_NU(gbe_dev))
+ ale_params.dev_id = "66ak2el";
+ else if (IS_SS_ID_2U(gbe_dev))
+ ale_params.dev_id = "66ak2g";
+ else if (IS_SS_ID_XGBE(gbe_dev))
+ ale_params.dev_id = "66ak2h-xgbe";
+
+ gbe_dev->ale = cpsw_ale_create(&ale_params);
+ if (IS_ERR(gbe_dev->ale)) {
+ dev_err(gbe_dev->dev, "error initializing ale engine\n");
+ ret = PTR_ERR(gbe_dev->ale);
+ goto free_sec_ports;
+ } else {
+ dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
+ }
+
+ cpts_node = of_get_child_by_name(node, "cpts");
+ if (!cpts_node)
+ cpts_node = of_node_get(node);
+
+ gbe_dev->cpts = cpts_create(gbe_dev->dev, gbe_dev->cpts_reg,
+ cpts_node, 0);
+ of_node_put(cpts_node);
+ if (IS_ENABLED(CONFIG_TI_CPTS) && IS_ERR(gbe_dev->cpts)) {
+ ret = PTR_ERR(gbe_dev->cpts);
+ goto free_sec_ports;
+ }
+
+ /* initialize host port */
+ gbe_init_host_port(gbe_dev);
+
+ spin_lock_bh(&gbe_dev->hw_stats_lock);
+ for (i = 0; i < gbe_dev->num_stats_mods; i++) {
+ if (IS_SS_ID_VER_14(gbe_dev))
+ gbe_reset_mod_stats_ver14(gbe_dev, i);
+ else
+ gbe_reset_mod_stats(gbe_dev, i);
+ }
+ spin_unlock_bh(&gbe_dev->hw_stats_lock);
+
+ timer_setup(&gbe_dev->timer, netcp_ethss_timer, 0);
+ gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
+ add_timer(&gbe_dev->timer);
+ *inst_priv = gbe_dev;
+ return 0;
+
+free_sec_ports:
+ free_secondary_ports(gbe_dev);
+ return ret;
+}
+
+static int gbe_attach(void *inst_priv, struct net_device *ndev,
+ struct device_node *node, void **intf_priv)
+{
+ struct gbe_priv *gbe_dev = inst_priv;
+ struct gbe_intf *gbe_intf;
+ int ret;
+
+ if (!node) {
+ dev_err(gbe_dev->dev, "interface node not available\n");
+ return -ENODEV;
+ }
+
+ gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
+ if (!gbe_intf)
+ return -ENOMEM;
+
+ gbe_intf->ndev = ndev;
+ gbe_intf->dev = gbe_dev->dev;
+ gbe_intf->gbe_dev = gbe_dev;
+
+ gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
+ sizeof(*gbe_intf->slave),
+ GFP_KERNEL);
+ if (!gbe_intf->slave) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ if (init_slave(gbe_dev, gbe_intf->slave, node)) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ gbe_intf->tx_pipe = gbe_dev->tx_pipe;
+ ndev->ethtool_ops = &keystone_ethtool_ops;
+ list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
+ *intf_priv = gbe_intf;
+ return 0;
+
+fail:
+ if (gbe_intf->slave)
+ devm_kfree(gbe_dev->dev, gbe_intf->slave);
+ if (gbe_intf)
+ devm_kfree(gbe_dev->dev, gbe_intf);
+ return ret;
+}
+
+static int gbe_release(void *intf_priv)
+{
+ struct gbe_intf *gbe_intf = intf_priv;
+
+ gbe_intf->ndev->ethtool_ops = NULL;
+ list_del(&gbe_intf->gbe_intf_list);
+ devm_kfree(gbe_intf->dev, gbe_intf->slave);
+ devm_kfree(gbe_intf->dev, gbe_intf);
+ return 0;
+}
+
+static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
+{
+ struct gbe_priv *gbe_dev = inst_priv;
+
+ del_timer_sync(&gbe_dev->timer);
+ cpts_release(gbe_dev->cpts);
+ cpsw_ale_stop(gbe_dev->ale);
+ netcp_txpipe_close(&gbe_dev->tx_pipe);
+ free_secondary_ports(gbe_dev);
+
+ if (!list_empty(&gbe_dev->gbe_intf_head))
+ dev_alert(gbe_dev->dev,
+ "unreleased ethss interfaces present\n");
+
+ return 0;
+}
+
+static struct netcp_module gbe_module = {
+ .name = GBE_MODULE_NAME,
+ .owner = THIS_MODULE,
+ .primary = true,
+ .probe = gbe_probe,
+ .open = gbe_open,
+ .close = gbe_close,
+ .remove = gbe_remove,
+ .attach = gbe_attach,
+ .release = gbe_release,
+ .add_addr = gbe_add_addr,
+ .del_addr = gbe_del_addr,
+ .add_vid = gbe_add_vid,
+ .del_vid = gbe_del_vid,
+ .ioctl = gbe_ioctl,
+};
+
+static struct netcp_module xgbe_module = {
+ .name = XGBE_MODULE_NAME,
+ .owner = THIS_MODULE,
+ .primary = true,
+ .probe = gbe_probe,
+ .open = gbe_open,
+ .close = gbe_close,
+ .remove = gbe_remove,
+ .attach = gbe_attach,
+ .release = gbe_release,
+ .add_addr = gbe_add_addr,
+ .del_addr = gbe_del_addr,
+ .add_vid = gbe_add_vid,
+ .del_vid = gbe_del_vid,
+ .ioctl = gbe_ioctl,
+};
+
+static int __init keystone_gbe_init(void)
+{
+ int ret;
+
+ ret = netcp_register_module(&gbe_module);
+ if (ret)
+ return ret;
+
+ ret = netcp_register_module(&xgbe_module);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+module_init(keystone_gbe_init);
+
+static void __exit keystone_gbe_exit(void)
+{
+ netcp_unregister_module(&gbe_module);
+ netcp_unregister_module(&xgbe_module);
+}
+module_exit(keystone_gbe_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
+MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
new file mode 100644
index 000000000..f7cf56d63
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * SGMI module initialisation
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * Sandeep Paulraj <s-paulraj@ti.com>
+ * Wingman Kwok <w-kwok2@ti.com>
+ *
+ */
+
+#include "netcp.h"
+
+#define SGMII_SRESET_RESET BIT(0)
+#define SGMII_SRESET_RTRESET BIT(1)
+
+#define SGMII_REG_STATUS_LOCK BIT(4)
+#define SGMII_REG_STATUS_LINK BIT(0)
+#define SGMII_REG_STATUS_AUTONEG BIT(2)
+#define SGMII_REG_CONTROL_AUTONEG BIT(0)
+
+#define SGMII23_OFFSET(x) ((x - 2) * 0x100)
+#define SGMII_OFFSET(x) ((x <= 1) ? (x * 0x100) : (SGMII23_OFFSET(x)))
+
+/* SGMII registers */
+#define SGMII_SRESET_REG(x) (SGMII_OFFSET(x) + 0x004)
+#define SGMII_CTL_REG(x) (SGMII_OFFSET(x) + 0x010)
+#define SGMII_STATUS_REG(x) (SGMII_OFFSET(x) + 0x014)
+#define SGMII_MRADV_REG(x) (SGMII_OFFSET(x) + 0x018)
+
+static void sgmii_write_reg(void __iomem *base, int reg, u32 val)
+{
+ writel(val, base + reg);
+}
+
+static u32 sgmii_read_reg(void __iomem *base, int reg)
+{
+ return readl(base + reg);
+}
+
+static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
+{
+ writel((readl(base + reg) | val), base + reg);
+}
+
+/* port is 0 based */
+int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
+{
+ /* Soft reset */
+ sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
+ SGMII_SRESET_RESET);
+
+ while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
+ SGMII_SRESET_RESET) != 0x0)
+ ;
+
+ return 0;
+}
+
+/* port is 0 based */
+bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
+{
+ u32 reg;
+ bool oldval;
+
+ /* Initiate a soft reset */
+ reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
+ oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
+ if (set)
+ reg |= SGMII_SRESET_RTRESET;
+ else
+ reg &= ~SGMII_SRESET_RTRESET;
+ sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
+ wmb();
+
+ return oldval;
+}
+
+int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
+{
+ u32 status = 0, link = 0;
+
+ status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+ if ((status & SGMII_REG_STATUS_LINK) != 0)
+ link = 1;
+ return link;
+}
+
+int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface)
+{
+ unsigned int i, status, mask;
+ u32 mr_adv_ability;
+ u32 control;
+
+ switch (interface) {
+ case SGMII_LINK_MAC_MAC_AUTONEG:
+ mr_adv_ability = 0x9801;
+ control = 0x21;
+ break;
+
+ case SGMII_LINK_MAC_PHY:
+ case SGMII_LINK_MAC_PHY_NO_MDIO:
+ mr_adv_ability = 1;
+ control = 1;
+ break;
+
+ case SGMII_LINK_MAC_MAC_FORCED:
+ mr_adv_ability = 0x9801;
+ control = 0x20;
+ break;
+
+ case SGMII_LINK_MAC_FIBER:
+ mr_adv_ability = 0x20;
+ control = 0x1;
+ break;
+
+ default:
+ WARN_ONCE(1, "Invalid sgmii interface: %d\n", interface);
+ return -EINVAL;
+ }
+
+ sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), 0);
+
+ /* Wait for the SerDes pll to lock */
+ for (i = 0; i < 1000; i++) {
+ usleep_range(1000, 2000);
+ status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+ if ((status & SGMII_REG_STATUS_LOCK) != 0)
+ break;
+ }
+
+ if ((status & SGMII_REG_STATUS_LOCK) == 0)
+ pr_err("serdes PLL not locked\n");
+
+ sgmii_write_reg(sgmii_ofs, SGMII_MRADV_REG(port), mr_adv_ability);
+ sgmii_write_reg(sgmii_ofs, SGMII_CTL_REG(port), control);
+
+ mask = SGMII_REG_STATUS_LINK;
+ if (control & SGMII_REG_CONTROL_AUTONEG)
+ mask |= SGMII_REG_STATUS_AUTONEG;
+
+ for (i = 0; i < 1000; i++) {
+ usleep_range(200, 500);
+ status = sgmii_read_reg(sgmii_ofs, SGMII_STATUS_REG(port));
+ if ((status & mask) == mask)
+ break;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/ti/netcp_xgbepcsr.c b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
new file mode 100644
index 000000000..112778aed
--- /dev/null
+++ b/drivers/net/ethernet/ti/netcp_xgbepcsr.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * XGE PCSR module initialisation
+ *
+ * Copyright (C) 2014 Texas Instruments Incorporated
+ * Authors: Sandeep Nair <sandeep_n@ti.com>
+ * WingMan Kwok <w-kwok2@ti.com>
+ *
+ */
+#include "netcp.h"
+
+/* XGBE registers */
+#define XGBE_CTRL_OFFSET 0x0c
+#define XGBE_SGMII_1_OFFSET 0x0114
+#define XGBE_SGMII_2_OFFSET 0x0214
+
+/* PCS-R registers */
+#define PCSR_CPU_CTRL_OFFSET 0x1fd0
+#define POR_EN BIT(29)
+
+#define reg_rmw(addr, value, mask) \
+ writel(((readl(addr) & (~(mask))) | \
+ (value & (mask))), (addr))
+
+/* bit mask of width w at offset s */
+#define MASK_WID_SH(w, s) (((1 << w) - 1) << s)
+
+/* shift value v to offset s */
+#define VAL_SH(v, s) (v << s)
+
+#define PHY_A(serdes) 0
+
+struct serdes_cfg {
+ u32 ofs;
+ u32 val;
+ u32 mask;
+};
+
+static struct serdes_cfg cfg_phyb_1p25g_156p25mhz_cmu0[] = {
+ {0x0000, 0x00800002, 0x00ff00ff},
+ {0x0014, 0x00003838, 0x0000ffff},
+ {0x0060, 0x1c44e438, 0xffffffff},
+ {0x0064, 0x00c18400, 0x00ffffff},
+ {0x0068, 0x17078200, 0xffffff00},
+ {0x006c, 0x00000014, 0x000000ff},
+ {0x0078, 0x0000c000, 0x0000ff00},
+ {0x0000, 0x00000003, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_156p25mhz_cmu1[] = {
+ {0x0c00, 0x00030002, 0x00ff00ff},
+ {0x0c14, 0x00005252, 0x0000ffff},
+ {0x0c28, 0x80000000, 0xff000000},
+ {0x0c2c, 0x000000f6, 0x000000ff},
+ {0x0c3c, 0x04000405, 0xff00ffff},
+ {0x0c40, 0xc0800000, 0xffff0000},
+ {0x0c44, 0x5a202062, 0xffffffff},
+ {0x0c48, 0x40040424, 0xffffffff},
+ {0x0c4c, 0x00004002, 0x0000ffff},
+ {0x0c50, 0x19001c00, 0xff00ff00},
+ {0x0c54, 0x00002100, 0x0000ff00},
+ {0x0c58, 0x00000060, 0x000000ff},
+ {0x0c60, 0x80131e7c, 0xffffffff},
+ {0x0c64, 0x8400cb02, 0xff00ffff},
+ {0x0c68, 0x17078200, 0xffffff00},
+ {0x0c6c, 0x00000016, 0x000000ff},
+ {0x0c74, 0x00000400, 0x0000ff00},
+ {0x0c78, 0x0000c000, 0x0000ff00},
+ {0x0c00, 0x00000003, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_16bit_lane[] = {
+ {0x0204, 0x00000080, 0x000000ff},
+ {0x0208, 0x0000920d, 0x0000ffff},
+ {0x0204, 0xfc000000, 0xff000000},
+ {0x0208, 0x00009104, 0x0000ffff},
+ {0x0210, 0x1a000000, 0xff000000},
+ {0x0214, 0x00006b58, 0x00ffffff},
+ {0x0218, 0x75800084, 0xffff00ff},
+ {0x022c, 0x00300000, 0x00ff0000},
+ {0x0230, 0x00003800, 0x0000ff00},
+ {0x024c, 0x008f0000, 0x00ff0000},
+ {0x0250, 0x30000000, 0xff000000},
+ {0x0260, 0x00000002, 0x000000ff},
+ {0x0264, 0x00000057, 0x000000ff},
+ {0x0268, 0x00575700, 0x00ffff00},
+ {0x0278, 0xff000000, 0xff000000},
+ {0x0280, 0x00500050, 0x00ff00ff},
+ {0x0284, 0x00001f15, 0x0000ffff},
+ {0x028c, 0x00006f00, 0x0000ff00},
+ {0x0294, 0x00000000, 0xffffff00},
+ {0x0298, 0x00002640, 0xff00ffff},
+ {0x029c, 0x00000003, 0x000000ff},
+ {0x02a4, 0x00000f13, 0x0000ffff},
+ {0x02a8, 0x0001b600, 0x00ffff00},
+ {0x0380, 0x00000030, 0x000000ff},
+ {0x03c0, 0x00000200, 0x0000ff00},
+ {0x03cc, 0x00000018, 0x000000ff},
+ {0x03cc, 0x00000000, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_phyb_10p3125g_comlane[] = {
+ {0x0a00, 0x00000800, 0x0000ff00},
+ {0x0a84, 0x00000000, 0x000000ff},
+ {0x0a8c, 0x00130000, 0x00ff0000},
+ {0x0a90, 0x77a00000, 0xffff0000},
+ {0x0a94, 0x00007777, 0x0000ffff},
+ {0x0b08, 0x000f0000, 0xffff0000},
+ {0x0b0c, 0x000f0000, 0x00ffffff},
+ {0x0b10, 0xbe000000, 0xff000000},
+ {0x0b14, 0x000000ff, 0x000000ff},
+ {0x0b18, 0x00000014, 0x000000ff},
+ {0x0b5c, 0x981b0000, 0xffff0000},
+ {0x0b64, 0x00001100, 0x0000ff00},
+ {0x0b78, 0x00000c00, 0x0000ff00},
+ {0x0abc, 0xff000000, 0xff000000},
+ {0x0ac0, 0x0000008b, 0x000000ff},
+};
+
+static struct serdes_cfg cfg_cm_c1_c2[] = {
+ {0x0208, 0x00000000, 0x00000f00},
+ {0x0208, 0x00000000, 0x0000001f},
+ {0x0204, 0x00000000, 0x00040000},
+ {0x0208, 0x000000a0, 0x000000e0},
+};
+
+static void netcp_xgbe_serdes_cmu_init(void __iomem *serdes_regs)
+{
+ int i;
+
+ /* cmu0 setup */
+ for (i = 0; i < ARRAY_SIZE(cfg_phyb_1p25g_156p25mhz_cmu0); i++) {
+ reg_rmw(serdes_regs + cfg_phyb_1p25g_156p25mhz_cmu0[i].ofs,
+ cfg_phyb_1p25g_156p25mhz_cmu0[i].val,
+ cfg_phyb_1p25g_156p25mhz_cmu0[i].mask);
+ }
+
+ /* cmu1 setup */
+ for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_156p25mhz_cmu1); i++) {
+ reg_rmw(serdes_regs + cfg_phyb_10p3125g_156p25mhz_cmu1[i].ofs,
+ cfg_phyb_10p3125g_156p25mhz_cmu1[i].val,
+ cfg_phyb_10p3125g_156p25mhz_cmu1[i].mask);
+ }
+}
+
+/* lane is 0 based */
+static void netcp_xgbe_serdes_lane_config(
+ void __iomem *serdes_regs, int lane)
+{
+ int i;
+
+ /* lane setup */
+ for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_16bit_lane); i++) {
+ reg_rmw(serdes_regs +
+ cfg_phyb_10p3125g_16bit_lane[i].ofs +
+ (0x200 * lane),
+ cfg_phyb_10p3125g_16bit_lane[i].val,
+ cfg_phyb_10p3125g_16bit_lane[i].mask);
+ }
+
+ /* disable auto negotiation*/
+ reg_rmw(serdes_regs + (0x200 * lane) + 0x0380,
+ 0x00000000, 0x00000010);
+
+ /* disable link training */
+ reg_rmw(serdes_regs + (0x200 * lane) + 0x03c0,
+ 0x00000000, 0x00000200);
+}
+
+static void netcp_xgbe_serdes_com_enable(void __iomem *serdes_regs)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_phyb_10p3125g_comlane); i++) {
+ reg_rmw(serdes_regs + cfg_phyb_10p3125g_comlane[i].ofs,
+ cfg_phyb_10p3125g_comlane[i].val,
+ cfg_phyb_10p3125g_comlane[i].mask);
+ }
+}
+
+static void netcp_xgbe_serdes_lane_enable(
+ void __iomem *serdes_regs, int lane)
+{
+ /* Set Lane Control Rate */
+ writel(0xe0e9e038, serdes_regs + 0x1fe0 + (4 * lane));
+}
+
+static void netcp_xgbe_serdes_phyb_rst_clr(void __iomem *serdes_regs)
+{
+ reg_rmw(serdes_regs + 0x0a00, 0x0000001f, 0x000000ff);
+}
+
+static void netcp_xgbe_serdes_pll_disable(void __iomem *serdes_regs)
+{
+ writel(0x88000000, serdes_regs + 0x1ff4);
+}
+
+static void netcp_xgbe_serdes_pll_enable(void __iomem *serdes_regs)
+{
+ netcp_xgbe_serdes_phyb_rst_clr(serdes_regs);
+ writel(0xee000000, serdes_regs + 0x1ff4);
+}
+
+static int netcp_xgbe_wait_pll_locked(void __iomem *sw_regs)
+{
+ unsigned long timeout;
+ int ret = 0;
+ u32 val_1, val_0;
+
+ timeout = jiffies + msecs_to_jiffies(500);
+ do {
+ val_0 = (readl(sw_regs + XGBE_SGMII_1_OFFSET) & BIT(4));
+ val_1 = (readl(sw_regs + XGBE_SGMII_2_OFFSET) & BIT(4));
+
+ if (val_1 && val_0)
+ return 0;
+
+ if (time_after(jiffies, timeout)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
+ cpu_relax();
+ } while (true);
+
+ pr_err("XGBE serdes not locked: time out.\n");
+ return ret;
+}
+
+static void netcp_xgbe_serdes_enable_xgmii_port(void __iomem *sw_regs)
+{
+ writel(0x03, sw_regs + XGBE_CTRL_OFFSET);
+}
+
+static u32 netcp_xgbe_serdes_read_tbus_val(void __iomem *serdes_regs)
+{
+ u32 tmp;
+
+ if (PHY_A(serdes_regs)) {
+ tmp = (readl(serdes_regs + 0x0ec) >> 24) & 0x0ff;
+ tmp |= ((readl(serdes_regs + 0x0fc) >> 16) & 0x00f00);
+ } else {
+ tmp = (readl(serdes_regs + 0x0f8) >> 16) & 0x0fff;
+ }
+
+ return tmp;
+}
+
+static void netcp_xgbe_serdes_write_tbus_addr(void __iomem *serdes_regs,
+ int select, int ofs)
+{
+ if (PHY_A(serdes_regs)) {
+ reg_rmw(serdes_regs + 0x0008, ((select << 5) + ofs) << 24,
+ ~0x00ffffff);
+ return;
+ }
+
+ /* For 2 lane Phy-B, lane0 is actually lane1 */
+ switch (select) {
+ case 1:
+ select = 2;
+ break;
+ case 2:
+ select = 3;
+ break;
+ default:
+ return;
+ }
+
+ reg_rmw(serdes_regs + 0x00fc, ((select << 8) + ofs) << 16, ~0xf800ffff);
+}
+
+static u32 netcp_xgbe_serdes_read_select_tbus(void __iomem *serdes_regs,
+ int select, int ofs)
+{
+ /* Set tbus address */
+ netcp_xgbe_serdes_write_tbus_addr(serdes_regs, select, ofs);
+ /* Get TBUS Value */
+ return netcp_xgbe_serdes_read_tbus_val(serdes_regs);
+}
+
+static void netcp_xgbe_serdes_reset_cdr(void __iomem *serdes_regs,
+ void __iomem *sig_detect_reg, int lane)
+{
+ u32 tmp, dlpf, tbus;
+
+ /*Get the DLPF values */
+ tmp = netcp_xgbe_serdes_read_select_tbus(
+ serdes_regs, lane + 1, 5);
+
+ dlpf = tmp >> 2;
+
+ if (dlpf < 400 || dlpf > 700) {
+ reg_rmw(sig_detect_reg, VAL_SH(2, 1), MASK_WID_SH(2, 1));
+ mdelay(1);
+ reg_rmw(sig_detect_reg, VAL_SH(0, 1), MASK_WID_SH(2, 1));
+ } else {
+ tbus = netcp_xgbe_serdes_read_select_tbus(serdes_regs, lane +
+ 1, 0xe);
+
+ pr_debug("XGBE: CDR centered, DLPF: %4d,%d,%d.\n",
+ tmp >> 2, tmp & 3, (tbus >> 2) & 3);
+ }
+}
+
+/* Call every 100 ms */
+static int netcp_xgbe_check_link_status(void __iomem *serdes_regs,
+ void __iomem *sw_regs, u32 lanes,
+ u32 *current_state, u32 *lane_down)
+{
+ void __iomem *pcsr_base = sw_regs + 0x0600;
+ void __iomem *sig_detect_reg;
+ u32 pcsr_rx_stat, blk_lock, blk_errs;
+ int loss, i, status = 1;
+
+ for (i = 0; i < lanes; i++) {
+ /* Get the Loss bit */
+ loss = readl(serdes_regs + 0x1fc0 + 0x20 + (i * 0x04)) & 0x1;
+
+ /* Get Block Errors and Block Lock bits */
+ pcsr_rx_stat = readl(pcsr_base + 0x0c + (i * 0x80));
+ blk_lock = (pcsr_rx_stat >> 30) & 0x1;
+ blk_errs = (pcsr_rx_stat >> 16) & 0x0ff;
+
+ /* Get Signal Detect Overlay Address */
+ sig_detect_reg = serdes_regs + (i * 0x200) + 0x200 + 0x04;
+
+ /* If Block errors maxed out, attempt recovery! */
+ if (blk_errs == 0x0ff)
+ blk_lock = 0;
+
+ switch (current_state[i]) {
+ case 0:
+ /* if good link lock the signal detect ON! */
+ if (!loss && blk_lock) {
+ pr_debug("XGBE PCSR Linked Lane: %d\n", i);
+ reg_rmw(sig_detect_reg, VAL_SH(3, 1),
+ MASK_WID_SH(2, 1));
+ current_state[i] = 1;
+ } else if (!blk_lock) {
+ /* if no lock, then reset CDR */
+ pr_debug("XGBE PCSR Recover Lane: %d\n", i);
+ netcp_xgbe_serdes_reset_cdr(serdes_regs,
+ sig_detect_reg, i);
+ }
+ break;
+
+ case 1:
+ if (!blk_lock) {
+ /* Link Lost? */
+ lane_down[i] = 1;
+ current_state[i] = 2;
+ }
+ break;
+
+ case 2:
+ if (blk_lock)
+ /* Nope just noise */
+ current_state[i] = 1;
+ else {
+ /* Lost the block lock, reset CDR if it is
+ * not centered and go back to sync state
+ */
+ netcp_xgbe_serdes_reset_cdr(serdes_regs,
+ sig_detect_reg, i);
+ current_state[i] = 0;
+ }
+ break;
+
+ default:
+ pr_err("XGBE: unknown current_state[%d] %d\n",
+ i, current_state[i]);
+ break;
+ }
+
+ if (blk_errs > 0) {
+ /* Reset the Error counts! */
+ reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x19, 0),
+ MASK_WID_SH(8, 0));
+
+ reg_rmw(pcsr_base + 0x08 + (i * 0x80), VAL_SH(0x00, 0),
+ MASK_WID_SH(8, 0));
+ }
+
+ status &= (current_state[i] == 1);
+ }
+
+ return status;
+}
+
+static int netcp_xgbe_serdes_check_lane(void __iomem *serdes_regs,
+ void __iomem *sw_regs)
+{
+ u32 current_state[2] = {0, 0};
+ int retries = 0, link_up;
+ u32 lane_down[2];
+
+ do {
+ lane_down[0] = 0;
+ lane_down[1] = 0;
+
+ link_up = netcp_xgbe_check_link_status(serdes_regs, sw_regs, 2,
+ current_state,
+ lane_down);
+
+ /* if we did not get link up then wait 100ms before calling
+ * it again
+ */
+ if (link_up)
+ break;
+
+ if (lane_down[0])
+ pr_debug("XGBE: detected link down on lane 0\n");
+
+ if (lane_down[1])
+ pr_debug("XGBE: detected link down on lane 1\n");
+
+ if (++retries > 1) {
+ pr_debug("XGBE: timeout waiting for serdes link up\n");
+ return -ETIMEDOUT;
+ }
+ mdelay(100);
+ } while (!link_up);
+
+ pr_debug("XGBE: PCSR link is up\n");
+ return 0;
+}
+
+static void netcp_xgbe_serdes_setup_cm_c1_c2(void __iomem *serdes_regs,
+ int lane, int cm, int c1, int c2)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_cm_c1_c2); i++) {
+ reg_rmw(serdes_regs + cfg_cm_c1_c2[i].ofs + (0x200 * lane),
+ cfg_cm_c1_c2[i].val,
+ cfg_cm_c1_c2[i].mask);
+ }
+}
+
+static void netcp_xgbe_reset_serdes(void __iomem *serdes_regs)
+{
+ /* Toggle the POR_EN bit in CONFIG.CPU_CTRL */
+ /* enable POR_EN bit */
+ reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, POR_EN, POR_EN);
+ usleep_range(10, 100);
+
+ /* disable POR_EN bit */
+ reg_rmw(serdes_regs + PCSR_CPU_CTRL_OFFSET, 0, POR_EN);
+ usleep_range(10, 100);
+}
+
+static int netcp_xgbe_serdes_config(void __iomem *serdes_regs,
+ void __iomem *sw_regs)
+{
+ u32 ret, i;
+
+ netcp_xgbe_serdes_pll_disable(serdes_regs);
+ netcp_xgbe_serdes_cmu_init(serdes_regs);
+
+ for (i = 0; i < 2; i++)
+ netcp_xgbe_serdes_lane_config(serdes_regs, i);
+
+ netcp_xgbe_serdes_com_enable(serdes_regs);
+ /* This is EVM + RTM-BOC specific */
+ for (i = 0; i < 2; i++)
+ netcp_xgbe_serdes_setup_cm_c1_c2(serdes_regs, i, 0, 0, 5);
+
+ netcp_xgbe_serdes_pll_enable(serdes_regs);
+ for (i = 0; i < 2; i++)
+ netcp_xgbe_serdes_lane_enable(serdes_regs, i);
+
+ /* SB PLL Status Poll */
+ ret = netcp_xgbe_wait_pll_locked(sw_regs);
+ if (ret)
+ return ret;
+
+ netcp_xgbe_serdes_enable_xgmii_port(sw_regs);
+ netcp_xgbe_serdes_check_lane(serdes_regs, sw_regs);
+ return ret;
+}
+
+int netcp_xgbe_serdes_init(void __iomem *serdes_regs, void __iomem *xgbe_regs)
+{
+ u32 val;
+
+ /* read COMLANE bits 4:0 */
+ val = readl(serdes_regs + 0xa00);
+ if (val & 0x1f) {
+ pr_debug("XGBE: serdes already in operation - reset\n");
+ netcp_xgbe_reset_serdes(serdes_regs);
+ }
+ return netcp_xgbe_serdes_config(serdes_regs, xgbe_regs);
+}
diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c
new file mode 100644
index 000000000..b3da76efa
--- /dev/null
+++ b/drivers/net/ethernet/ti/tlan.c
@@ -0,0 +1,3277 @@
+/*******************************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.c
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1998 James Banks
+ * (C) 1999-2001 Torben Mathiasen
+ * (C) 2002 Samuel Chessman
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ ** Useful (if not required) reading:
+ *
+ * Texas Instruments, ThunderLAN Programmer's Guide,
+ * TI Literature Number SPWU013A
+ * available in PDF format from www.ti.com
+ * Level One, LXT901 and LXT970 Data Sheets
+ * available in PDF format from www.level1.com
+ * National Semiconductor, DP83840A Data Sheet
+ * available in PDF format from www.national.com
+ * Microchip Technology, 24C01A/02A/04A Data Sheet
+ * available in PDF format from www.microchip.com
+ *
+ ******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/hardirq.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ioport.h>
+#include <linux/eisa.h>
+#include <linux/pci.h>
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+
+#include "tlan.h"
+
+
+/* For removing EISA devices */
+static struct net_device *tlan_eisa_devices;
+
+static int tlan_devices_installed;
+
+/* Set speed, duplex and aui settings */
+static int aui[MAX_TLAN_BOARDS];
+static int duplex[MAX_TLAN_BOARDS];
+static int speed[MAX_TLAN_BOARDS];
+static int boards_found;
+module_param_array(aui, int, NULL, 0);
+module_param_array(duplex, int, NULL, 0);
+module_param_array(speed, int, NULL, 0);
+MODULE_PARM_DESC(aui, "ThunderLAN use AUI port(s) (0-1)");
+MODULE_PARM_DESC(duplex,
+ "ThunderLAN duplex setting(s) (0-default, 1-half, 2-full)");
+MODULE_PARM_DESC(speed, "ThunderLAN port speed setting(s) (0,10,100)");
+
+MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
+MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
+MODULE_LICENSE("GPL");
+
+/* Turn on debugging.
+ * See Documentation/networking/device_drivers/ethernet/ti/tlan.rst for details
+ */
+static int debug;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "ThunderLAN debug mask");
+
+static const char tlan_signature[] = "TLAN";
+static const char tlan_banner[] = "ThunderLAN driver v1.17\n";
+static int tlan_have_pci;
+static int tlan_have_eisa;
+
+static const char * const media[] = {
+ "10BaseT-HD", "10BaseT-FD", "100baseTx-HD",
+ "100BaseTx-FD", "100BaseT4", NULL
+};
+
+static struct board {
+ const char *device_label;
+ u32 flags;
+ u16 addr_ofs;
+} board_info[] = {
+ { "Compaq Netelligent 10 T PCI UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10/100 TX PCI UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Integrated NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq NetFlex-3/P",
+ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+ { "Compaq NetFlex-3/P", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent Integrated 10/100 TX UTP",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent Dual 10/100 TX PCI UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq Netelligent 10/100 TX Embedded UTP",
+ TLAN_ADAPTER_NONE, 0x83 },
+ { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
+ { "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
+ TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+ { "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
+ TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
+ { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
+ { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED | /* EISA card */
+ TLAN_ADAPTER_UNMANAGED_PHY | TLAN_ADAPTER_BIT_RATE_PHY, 0x83 },
+ { "Compaq NetFlex-3/E",
+ TLAN_ADAPTER_ACTIVITY_LED, 0x83 }, /* EISA card */
+};
+
+static const struct pci_device_id tlan_pci_tbl[] = {
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL10,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_THUNDER,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETFLEX3B,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100PI,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100D,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_NETEL100I,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2183,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2325,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
+ { PCI_VENDOR_ID_OLICOM, PCI_DEVICE_ID_OLICOM_OC2326,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
+ { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_NETELLIGENT_10_T2,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
+ { 0,}
+};
+MODULE_DEVICE_TABLE(pci, tlan_pci_tbl);
+
+static void tlan_eisa_probe(void);
+static void tlan_eisa_cleanup(void);
+static int tlan_init(struct net_device *);
+static int tlan_open(struct net_device *dev);
+static netdev_tx_t tlan_start_tx(struct sk_buff *, struct net_device *);
+static irqreturn_t tlan_handle_interrupt(int, void *);
+static int tlan_close(struct net_device *);
+static struct net_device_stats *tlan_get_stats(struct net_device *);
+static void tlan_set_multicast_list(struct net_device *);
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr,
+ int irq, int rev, const struct pci_device_id *ent);
+static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue);
+static void tlan_tx_timeout_work(struct work_struct *work);
+static int tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent);
+
+static u32 tlan_handle_tx_eof(struct net_device *, u16);
+static u32 tlan_handle_stat_overflow(struct net_device *, u16);
+static u32 tlan_handle_rx_eof(struct net_device *, u16);
+static u32 tlan_handle_dummy(struct net_device *, u16);
+static u32 tlan_handle_tx_eoc(struct net_device *, u16);
+static u32 tlan_handle_status_check(struct net_device *, u16);
+static u32 tlan_handle_rx_eoc(struct net_device *, u16);
+
+static void tlan_timer(struct timer_list *t);
+static void tlan_phy_monitor(struct timer_list *t);
+
+static void tlan_reset_lists(struct net_device *);
+static void tlan_free_lists(struct net_device *);
+static void tlan_print_dio(u16);
+static void tlan_print_list(struct tlan_list *, char *, int);
+static void tlan_read_and_clear_stats(struct net_device *, int);
+static void tlan_reset_adapter(struct net_device *);
+static void tlan_finish_reset(struct net_device *);
+static void tlan_set_mac(struct net_device *, int areg, const char *mac);
+
+static void __tlan_phy_print(struct net_device *);
+static void tlan_phy_print(struct net_device *);
+static void tlan_phy_detect(struct net_device *);
+static void tlan_phy_power_down(struct net_device *);
+static void tlan_phy_power_up(struct net_device *);
+static void tlan_phy_reset(struct net_device *);
+static void tlan_phy_start_link(struct net_device *);
+static void tlan_phy_finish_auto_neg(struct net_device *);
+
+/*
+ static int tlan_phy_nop(struct net_device *);
+ static int tlan_phy_internal_check(struct net_device *);
+ static int tlan_phy_internal_service(struct net_device *);
+ static int tlan_phy_dp83840a_check(struct net_device *);
+*/
+
+static bool __tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_read_reg(struct net_device *, u16, u16, u16 *);
+static void tlan_mii_send_data(u16, u32, unsigned);
+static void tlan_mii_sync(u16);
+static void __tlan_mii_write_reg(struct net_device *, u16, u16, u16);
+static void tlan_mii_write_reg(struct net_device *, u16, u16, u16);
+
+static void tlan_ee_send_start(u16);
+static int tlan_ee_send_byte(u16, u8, int);
+static void tlan_ee_receive_byte(u16, u8 *, int);
+static int tlan_ee_read_byte(struct net_device *, u8, u8 *);
+
+
+static inline void
+tlan_store_skb(struct tlan_list *tag, struct sk_buff *skb)
+{
+ unsigned long addr = (unsigned long)skb;
+ tag->buffer[9].address = addr;
+ tag->buffer[8].address = upper_32_bits(addr);
+}
+
+static inline struct sk_buff *
+tlan_get_skb(const struct tlan_list *tag)
+{
+ unsigned long addr;
+
+ addr = tag->buffer[9].address;
+ addr |= ((unsigned long) tag->buffer[8].address << 16) << 16;
+ return (struct sk_buff *) addr;
+}
+
+static u32
+(*tlan_int_vector[TLAN_INT_NUMBER_OF_INTS])(struct net_device *, u16) = {
+ NULL,
+ tlan_handle_tx_eof,
+ tlan_handle_stat_overflow,
+ tlan_handle_rx_eof,
+ tlan_handle_dummy,
+ tlan_handle_tx_eoc,
+ tlan_handle_status_check,
+ tlan_handle_rx_eoc
+};
+
+static void
+tlan_set_timer(struct net_device *dev, u32 ticks, u32 type)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function != NULL &&
+ priv->timer_type != TLAN_TIMER_ACTIVITY) {
+ spin_unlock_irqrestore(&priv->lock, flags);
+ return;
+ }
+ priv->timer.function = tlan_timer;
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ priv->timer_set_at = jiffies;
+ priv->timer_type = type;
+ mod_timer(&priv->timer, jiffies + ticks);
+
+}
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver primary functions
+
+these functions are more or less common to all linux network drivers.
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+
+
+/***************************************************************
+ * tlan_remove_one
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * None
+ *
+ * Goes through the TLanDevices list and frees the device
+ * structs and memory associated with each device (lists
+ * and buffers). It also ureserves the IO port regions
+ * associated with this device.
+ *
+ **************************************************************/
+
+
+static void tlan_remove_one(struct pci_dev *pdev)
+{
+ struct net_device *dev = pci_get_drvdata(pdev);
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ if (priv->dma_storage) {
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage, priv->dma_storage_dma);
+ }
+
+#ifdef CONFIG_PCI
+ pci_release_regions(pdev);
+#endif
+
+ cancel_work_sync(&priv->tlan_tqueue);
+ free_netdev(dev);
+}
+
+static void tlan_start(struct net_device *dev)
+{
+ tlan_reset_lists(dev);
+ /* NOTE: It might not be necessary to read the stats before a
+ reset if you don't care what the values are.
+ */
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ netif_wake_queue(dev);
+}
+
+static void tlan_stop(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ del_timer_sync(&priv->media_timer);
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+ /* Reset and power down phy */
+ tlan_reset_adapter(dev);
+ if (priv->timer.function != NULL) {
+ del_timer_sync(&priv->timer);
+ priv->timer.function = NULL;
+ }
+}
+
+static int __maybe_unused tlan_suspend(struct device *dev_d)
+{
+ struct net_device *dev = dev_get_drvdata(dev_d);
+
+ if (netif_running(dev))
+ tlan_stop(dev);
+
+ netif_device_detach(dev);
+
+ return 0;
+}
+
+static int __maybe_unused tlan_resume(struct device *dev_d)
+{
+ struct net_device *dev = dev_get_drvdata(dev_d);
+ netif_device_attach(dev);
+
+ if (netif_running(dev))
+ tlan_start(dev);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(tlan_pm_ops, tlan_suspend, tlan_resume);
+
+static struct pci_driver tlan_driver = {
+ .name = "tlan",
+ .id_table = tlan_pci_tbl,
+ .probe = tlan_init_one,
+ .remove = tlan_remove_one,
+ .driver.pm = &tlan_pm_ops,
+};
+
+static int __init tlan_probe(void)
+{
+ int rc = -ENODEV;
+
+ pr_info("%s", tlan_banner);
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "Starting PCI Probe....\n");
+
+ /* Use new style PCI probing. Now the kernel will
+ do most of this for us */
+ rc = pci_register_driver(&tlan_driver);
+
+ if (rc != 0) {
+ pr_err("Could not register pci driver\n");
+ goto err_out_pci_free;
+ }
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "Starting EISA Probe....\n");
+ tlan_eisa_probe();
+
+ pr_info("%d device%s installed, PCI: %d EISA: %d\n",
+ tlan_devices_installed, tlan_devices_installed == 1 ? "" : "s",
+ tlan_have_pci, tlan_have_eisa);
+
+ if (tlan_devices_installed == 0) {
+ rc = -ENODEV;
+ goto err_out_pci_unreg;
+ }
+ return 0;
+
+err_out_pci_unreg:
+ pci_unregister_driver(&tlan_driver);
+err_out_pci_free:
+ return rc;
+}
+
+
+static int tlan_init_one(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ return tlan_probe1(pdev, -1, -1, 0, ent);
+}
+
+
+/*
+***************************************************************
+* tlan_probe1
+*
+* Returns:
+* 0 on success, error code on error
+* Parms:
+* none
+*
+* The name is lower case to fit in with all the rest of
+* the netcard_probe names. This function looks for
+* another TLan based adapter, setting it up with the
+* allocated device struct if one is found.
+* tlan_probe has been ported to the new net API and
+* now allocates its own device structure. This function
+* is also used by modules.
+*
+**************************************************************/
+
+static int tlan_probe1(struct pci_dev *pdev, long ioaddr, int irq, int rev,
+ const struct pci_device_id *ent)
+{
+
+ struct net_device *dev;
+ struct tlan_priv *priv;
+ u16 device_id;
+ int reg, rc = -ENODEV;
+
+#ifdef CONFIG_PCI
+ if (pdev) {
+ rc = pci_enable_device(pdev);
+ if (rc)
+ return rc;
+
+ rc = pci_request_regions(pdev, tlan_signature);
+ if (rc) {
+ pr_err("Could not reserve IO regions\n");
+ goto err_out;
+ }
+ }
+#endif /* CONFIG_PCI */
+
+ dev = alloc_etherdev(sizeof(struct tlan_priv));
+ if (dev == NULL) {
+ rc = -ENOMEM;
+ goto err_out_regions;
+ }
+ SET_NETDEV_DEV(dev, &pdev->dev);
+
+ priv = netdev_priv(dev);
+
+ priv->pci_dev = pdev;
+ priv->dev = dev;
+
+ /* Is this a PCI device? */
+ if (pdev) {
+ u32 pci_io_base = 0;
+
+ priv->adapter = &board_info[ent->driver_data];
+
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc) {
+ pr_err("No suitable PCI mapping available\n");
+ goto err_out_free_dev;
+ }
+
+ for (reg = 0; reg <= 5; reg++) {
+ if (pci_resource_flags(pdev, reg) & IORESOURCE_IO) {
+ pci_io_base = pci_resource_start(pdev, reg);
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "IO mapping is available at %x.\n",
+ pci_io_base);
+ break;
+ }
+ }
+ if (!pci_io_base) {
+ pr_err("No IO mappings available\n");
+ rc = -EIO;
+ goto err_out_free_dev;
+ }
+
+ dev->base_addr = pci_io_base;
+ dev->irq = pdev->irq;
+ priv->adapter_rev = pdev->revision;
+ pci_set_master(pdev);
+ pci_set_drvdata(pdev, dev);
+
+ } else { /* EISA card */
+ /* This is a hack. We need to know which board structure
+ * is suited for this adapter */
+ device_id = inw(ioaddr + EISA_ID2);
+ if (device_id == 0x20F1) {
+ priv->adapter = &board_info[13]; /* NetFlex-3/E */
+ priv->adapter_rev = 23; /* TLAN 2.3 */
+ } else {
+ priv->adapter = &board_info[14];
+ priv->adapter_rev = 10; /* TLAN 1.0 */
+ }
+ dev->base_addr = ioaddr;
+ dev->irq = irq;
+ }
+
+ /* Kernel parameters */
+ if (dev->mem_start) {
+ priv->aui = dev->mem_start & 0x01;
+ priv->duplex = ((dev->mem_start & 0x06) == 0x06) ? 0
+ : (dev->mem_start & 0x06) >> 1;
+ priv->speed = ((dev->mem_start & 0x18) == 0x18) ? 0
+ : (dev->mem_start & 0x18) >> 3;
+
+ if (priv->speed == 0x1)
+ priv->speed = TLAN_SPEED_10;
+ else if (priv->speed == 0x2)
+ priv->speed = TLAN_SPEED_100;
+
+ debug = priv->debug = dev->mem_end;
+ } else {
+ priv->aui = aui[boards_found];
+ priv->speed = speed[boards_found];
+ priv->duplex = duplex[boards_found];
+ priv->debug = debug;
+ }
+
+ /* This will be used when we get an adapter error from
+ * within our irq handler */
+ INIT_WORK(&priv->tlan_tqueue, tlan_tx_timeout_work);
+
+ spin_lock_init(&priv->lock);
+
+ rc = tlan_init(dev);
+ if (rc) {
+ pr_err("Could not set up device\n");
+ goto err_out_free_dev;
+ }
+
+ rc = register_netdev(dev);
+ if (rc) {
+ pr_err("Could not register device\n");
+ goto err_out_uninit;
+ }
+
+
+ tlan_devices_installed++;
+ boards_found++;
+
+ /* pdev is NULL if this is an EISA device */
+ if (pdev)
+ tlan_have_pci++;
+ else {
+ priv->next_device = tlan_eisa_devices;
+ tlan_eisa_devices = dev;
+ tlan_have_eisa++;
+ }
+
+ netdev_info(dev, "irq=%2d, io=%04x, %s, Rev. %d\n",
+ (int)dev->irq,
+ (int)dev->base_addr,
+ priv->adapter->device_label,
+ priv->adapter_rev);
+ return 0;
+
+err_out_uninit:
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage, priv->dma_storage_dma);
+err_out_free_dev:
+ free_netdev(dev);
+err_out_regions:
+#ifdef CONFIG_PCI
+ if (pdev)
+ pci_release_regions(pdev);
+err_out:
+#endif
+ if (pdev)
+ pci_disable_device(pdev);
+ return rc;
+}
+
+
+static void tlan_eisa_cleanup(void)
+{
+ struct net_device *dev;
+ struct tlan_priv *priv;
+
+ while (tlan_have_eisa) {
+ dev = tlan_eisa_devices;
+ priv = netdev_priv(dev);
+ if (priv->dma_storage) {
+ dma_free_coherent(&priv->pci_dev->dev, priv->dma_size,
+ priv->dma_storage,
+ priv->dma_storage_dma);
+ }
+ release_region(dev->base_addr, 0x10);
+ unregister_netdev(dev);
+ tlan_eisa_devices = priv->next_device;
+ free_netdev(dev);
+ tlan_have_eisa--;
+ }
+}
+
+
+static void __exit tlan_exit(void)
+{
+ pci_unregister_driver(&tlan_driver);
+
+ if (tlan_have_eisa)
+ tlan_eisa_cleanup();
+
+}
+
+
+/* Module loading/unloading */
+module_init(tlan_probe);
+module_exit(tlan_exit);
+
+
+
+/**************************************************************
+ * tlan_eisa_probe
+ *
+ * Returns: 0 on success, 1 otherwise
+ *
+ * Parms: None
+ *
+ *
+ * This functions probes for EISA devices and calls
+ * TLan_probe1 when one is found.
+ *
+ *************************************************************/
+
+static void __init tlan_eisa_probe(void)
+{
+ long ioaddr;
+ int irq;
+ u16 device_id;
+
+ if (!EISA_bus) {
+ TLAN_DBG(TLAN_DEBUG_PROBE, "No EISA bus present\n");
+ return;
+ }
+
+ /* Loop through all slots of the EISA bus */
+ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) {
+
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc80, inw(ioaddr + EISA_ID));
+ TLAN_DBG(TLAN_DEBUG_PROBE, "EISA_ID 0x%4x: 0x%4x\n",
+ (int) ioaddr + 0xc82, inw(ioaddr + EISA_ID2));
+
+
+ TLAN_DBG(TLAN_DEBUG_PROBE,
+ "Probing for EISA adapter at IO: 0x%4x : ",
+ (int) ioaddr);
+ if (request_region(ioaddr, 0x10, tlan_signature) == NULL)
+ goto out;
+
+ if (inw(ioaddr + EISA_ID) != 0x110E) {
+ release_region(ioaddr, 0x10);
+ goto out;
+ }
+
+ device_id = inw(ioaddr + EISA_ID2);
+ if (device_id != 0x20F1 && device_id != 0x40F1) {
+ release_region(ioaddr, 0x10);
+ goto out;
+ }
+
+ /* check if adapter is enabled */
+ if (inb(ioaddr + EISA_CR) != 0x1) {
+ release_region(ioaddr, 0x10);
+ goto out2;
+ }
+
+ if (debug == 0x10)
+ pr_info("Found one\n");
+
+
+ /* Get irq from board */
+ switch (inb(ioaddr + 0xcc0)) {
+ case(0x10):
+ irq = 5;
+ break;
+ case(0x20):
+ irq = 9;
+ break;
+ case(0x40):
+ irq = 10;
+ break;
+ case(0x80):
+ irq = 11;
+ break;
+ default:
+ goto out;
+ }
+
+
+ /* Setup the newly found eisa adapter */
+ tlan_probe1(NULL, ioaddr, irq, 12, NULL);
+ continue;
+
+out:
+ if (debug == 0x10)
+ pr_info("None found\n");
+ continue;
+
+out2:
+ if (debug == 0x10)
+ pr_info("Card found but it is not enabled, skipping\n");
+ continue;
+
+ }
+
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void tlan_poll(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ tlan_handle_interrupt(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
+static const struct net_device_ops tlan_netdev_ops = {
+ .ndo_open = tlan_open,
+ .ndo_stop = tlan_close,
+ .ndo_start_xmit = tlan_start_tx,
+ .ndo_tx_timeout = tlan_tx_timeout,
+ .ndo_get_stats = tlan_get_stats,
+ .ndo_set_rx_mode = tlan_set_multicast_list,
+ .ndo_eth_ioctl = tlan_ioctl,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = tlan_poll,
+#endif
+};
+
+static void tlan_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ strscpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ if (priv->pci_dev)
+ strscpy(info->bus_info, pci_name(priv->pci_dev),
+ sizeof(info->bus_info));
+ else
+ strscpy(info->bus_info, "EISA", sizeof(info->bus_info));
+}
+
+static int tlan_get_eeprom_len(struct net_device *dev)
+{
+ return TLAN_EEPROM_SIZE;
+}
+
+static int tlan_get_eeprom(struct net_device *dev,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ int i;
+
+ for (i = 0; i < TLAN_EEPROM_SIZE; i++)
+ if (tlan_ee_read_byte(dev, i, &data[i]))
+ return -EIO;
+
+ return 0;
+}
+
+static const struct ethtool_ops tlan_ethtool_ops = {
+ .get_drvinfo = tlan_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_eeprom_len = tlan_get_eeprom_len,
+ .get_eeprom = tlan_get_eeprom,
+};
+
+/***************************************************************
+ * tlan_init
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev The structure of the device to be
+ * init'ed.
+ *
+ * This function completes the initialization of the
+ * device structure and driver. It reserves the IO
+ * addresses, allocates memory for the lists and bounce
+ * buffers, retrieves the MAC address from the eeprom
+ * and assignes the device's methods.
+ *
+ **************************************************************/
+
+static int tlan_init(struct net_device *dev)
+{
+ int dma_size;
+ int err;
+ int i;
+ struct tlan_priv *priv;
+ u8 addr[ETH_ALEN];
+
+ priv = netdev_priv(dev);
+
+ dma_size = (TLAN_NUM_RX_LISTS + TLAN_NUM_TX_LISTS)
+ * (sizeof(struct tlan_list));
+ priv->dma_storage = dma_alloc_coherent(&priv->pci_dev->dev, dma_size,
+ &priv->dma_storage_dma, GFP_KERNEL);
+ priv->dma_size = dma_size;
+
+ if (priv->dma_storage == NULL) {
+ pr_err("Could not allocate lists and buffers for %s\n",
+ dev->name);
+ return -ENOMEM;
+ }
+ priv->rx_list = (struct tlan_list *)
+ ALIGN((unsigned long)priv->dma_storage, 8);
+ priv->rx_list_dma = ALIGN(priv->dma_storage_dma, 8);
+ priv->tx_list = priv->rx_list + TLAN_NUM_RX_LISTS;
+ priv->tx_list_dma =
+ priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
+
+ err = 0;
+ for (i = 0; i < ETH_ALEN; i++)
+ err |= tlan_ee_read_byte(dev,
+ (u8) priv->adapter->addr_ofs + i,
+ addr + i);
+ if (err) {
+ pr_err("%s: Error reading MAC from eeprom: %d\n",
+ dev->name, err);
+ }
+ /* Olicom OC-2325/OC-2326 have the address byte-swapped */
+ if (priv->adapter->addr_ofs == 0xf8) {
+ for (i = 0; i < ETH_ALEN; i += 2) {
+ char tmp = addr[i];
+ addr[i] = addr[i + 1];
+ addr[i + 1] = tmp;
+ }
+ }
+ eth_hw_addr_set(dev, addr);
+
+ netif_carrier_off(dev);
+
+ /* Device methods */
+ dev->netdev_ops = &tlan_netdev_ops;
+ dev->ethtool_ops = &tlan_ethtool_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
+
+ return 0;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_open
+ *
+ * Returns:
+ * 0 on success, error code otherwise.
+ * Parms:
+ * dev Structure of device to be opened.
+ *
+ * This routine puts the driver and TLAN adapter in a
+ * state where it is ready to send and receive packets.
+ * It allocates the IRQ, resets and brings the adapter
+ * out of reset, and allows interrupts. It also delays
+ * the startup for autonegotiation or sends a Rx GO
+ * command to the adapter, as appropriate.
+ *
+ **************************************************************/
+
+static int tlan_open(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int err;
+
+ priv->tlan_rev = tlan_dio_read8(dev->base_addr, TLAN_DEF_REVISION);
+ err = request_irq(dev->irq, tlan_handle_interrupt, IRQF_SHARED,
+ dev->name, dev);
+
+ if (err) {
+ netdev_err(dev, "Cannot open because IRQ %d is already in use\n",
+ dev->irq);
+ return err;
+ }
+
+ timer_setup(&priv->timer, NULL, 0);
+ timer_setup(&priv->media_timer, tlan_phy_monitor, 0);
+
+ tlan_start(dev);
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Opened. TLAN Chip Rev: %x\n",
+ dev->name, priv->tlan_rev);
+
+ return 0;
+
+}
+
+
+
+/**************************************************************
+ * tlan_ioctl
+ *
+ * Returns:
+ * 0 on success, error code otherwise
+ * Params:
+ * dev structure of device to receive ioctl.
+ *
+ * rq ifreq structure to hold userspace data.
+ *
+ * cmd ioctl command.
+ *
+ *
+ *************************************************************/
+
+static int tlan_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct mii_ioctl_data *data = if_mii(rq);
+ u32 phy = priv->phy[priv->phy_num];
+
+ if (!priv->phy_online)
+ return -EAGAIN;
+
+ switch (cmd) {
+ case SIOCGMIIPHY: /* get address of MII PHY in use. */
+ data->phy_id = phy;
+ fallthrough;
+
+
+ case SIOCGMIIREG: /* read MII PHY register. */
+ tlan_mii_read_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, &data->val_out);
+ return 0;
+
+
+ case SIOCSMIIREG: /* write MII PHY register. */
+ tlan_mii_write_reg(dev, data->phy_id & 0x1f,
+ data->reg_num & 0x1f, data->val_in);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+
+/***************************************************************
+ * tlan_tx_timeout
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * dev structure of device which timed out
+ * during transmit.
+ *
+ **************************************************************/
+
+static void tlan_tx_timeout(struct net_device *dev, unsigned int txqueue)
+{
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Transmit timed out.\n", dev->name);
+
+ /* Ok so we timed out, lets see what we can do about it...*/
+ tlan_free_lists(dev);
+ tlan_reset_lists(dev);
+ tlan_read_and_clear_stats(dev, TLAN_IGNORE);
+ tlan_reset_adapter(dev);
+ netif_trans_update(dev); /* prevent tx timeout */
+ netif_wake_queue(dev);
+
+}
+
+
+/***************************************************************
+ * tlan_tx_timeout_work
+ *
+ * Returns: nothing
+ *
+ * Params:
+ * work work item of device which timed out
+ *
+ **************************************************************/
+
+static void tlan_tx_timeout_work(struct work_struct *work)
+{
+ struct tlan_priv *priv =
+ container_of(work, struct tlan_priv, tlan_tqueue);
+
+ tlan_tx_timeout(priv->dev, UINT_MAX);
+}
+
+
+
+/***************************************************************
+ * tlan_start_tx
+ *
+ * Returns:
+ * 0 on success, non-zero on failure.
+ * Parms:
+ * skb A pointer to the sk_buff containing the
+ * frame to be sent.
+ * dev The device to send the data on.
+ *
+ * This function adds a frame to the Tx list to be sent
+ * ASAP. First it verifies that the adapter is ready and
+ * there is room in the queue. Then it sets up the next
+ * available list, copies the frame to the corresponding
+ * buffer. If the adapter Tx channel is idle, it gives
+ * the adapter a Tx Go command on the list, otherwise it
+ * sets the forward address of the previous list to point
+ * to this one. Then it frees the sk_buff.
+ *
+ **************************************************************/
+
+static netdev_tx_t tlan_start_tx(struct sk_buff *skb, struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ dma_addr_t tail_list_phys;
+ struct tlan_list *tail_list;
+ unsigned long flags;
+ unsigned int txlen;
+
+ if (!priv->phy_online) {
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s PHY is not ready\n",
+ dev->name);
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (skb_padto(skb, TLAN_MIN_FRAME_SIZE))
+ return NETDEV_TX_OK;
+ txlen = max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE);
+
+ tail_list = priv->tx_list + priv->tx_tail;
+ tail_list_phys =
+ priv->tx_list_dma + sizeof(struct tlan_list)*priv->tx_tail;
+
+ if (tail_list->c_stat != TLAN_CSTAT_UNUSED) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: %s is busy (Head=%d Tail=%d)\n",
+ dev->name, priv->tx_head, priv->tx_tail);
+ netif_stop_queue(dev);
+ priv->tx_busy_count++;
+ return NETDEV_TX_BUSY;
+ }
+
+ tail_list->forward = 0;
+
+ tail_list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
+ skb->data, txlen,
+ DMA_TO_DEVICE);
+ tlan_store_skb(tail_list, skb);
+
+ tail_list->frame_size = (u16) txlen;
+ tail_list->buffer[0].count = TLAN_LAST_BUFFER | (u32) txlen;
+ tail_list->buffer[1].count = 0;
+ tail_list->buffer[1].address = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ tail_list->c_stat = TLAN_CSTAT_READY;
+ if (!priv->tx_in_progress) {
+ priv->tx_in_progress = 1;
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Starting TX on buffer %d\n",
+ priv->tx_tail);
+ outl(tail_list_phys, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO, dev->base_addr + TLAN_HOST_CMD);
+ } else {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Adding buffer %d to TX channel\n",
+ priv->tx_tail);
+ if (priv->tx_tail == 0) {
+ (priv->tx_list + (TLAN_NUM_TX_LISTS - 1))->forward
+ = tail_list_phys;
+ } else {
+ (priv->tx_list + (priv->tx_tail - 1))->forward
+ = tail_list_phys;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ CIRC_INC(priv->tx_tail, TLAN_NUM_TX_LISTS);
+
+ return NETDEV_TX_OK;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_interrupt
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * irq The line on which the interrupt
+ * occurred.
+ * dev_id A pointer to the device assigned to
+ * this irq line.
+ *
+ * This function handles an interrupt generated by its
+ * assigned TLAN adapter. The function deactivates
+ * interrupts on its adapter, records the type of
+ * interrupt, executes the appropriate subhandler, and
+ * acknowdges the interrupt to the adapter (thus
+ * re-enabling adapter interrupts.
+ *
+ **************************************************************/
+
+static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 host_int;
+ u16 type;
+
+ spin_lock(&priv->lock);
+
+ host_int = inw(dev->base_addr + TLAN_HOST_INT);
+ type = (host_int & TLAN_HI_IT_MASK) >> 2;
+ if (type) {
+ u32 ack;
+ u32 host_cmd;
+
+ outw(host_int, dev->base_addr + TLAN_HOST_INT);
+ ack = tlan_int_vector[type](dev, host_int);
+
+ if (ack) {
+ host_cmd = TLAN_HC_ACK | ack | (type << 18);
+ outl(host_cmd, dev->base_addr + TLAN_HOST_CMD);
+ }
+ }
+
+ spin_unlock(&priv->lock);
+
+ return IRQ_RETVAL(type);
+}
+
+
+
+
+/***************************************************************
+ * tlan_close
+ *
+ * Returns:
+ * An error code.
+ * Parms:
+ * dev The device structure of the device to
+ * close.
+ *
+ * This function shuts down the adapter. It records any
+ * stats, puts the adapter into reset state, deactivates
+ * its time as needed, and frees the irq it is using.
+ *
+ **************************************************************/
+
+static int tlan_close(struct net_device *dev)
+{
+ tlan_stop(dev);
+
+ free_irq(dev->irq, dev);
+ tlan_free_lists(dev);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "Device %s closed.\n", dev->name);
+
+ return 0;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_get_stats
+ *
+ * Returns:
+ * A pointer to the device's statistics structure.
+ * Parms:
+ * dev The device structure to return the
+ * stats for.
+ *
+ * This function updates the devices statistics by reading
+ * the TLAN chip's onboard registers. Then it returns the
+ * address of the statistics structure.
+ *
+ **************************************************************/
+
+static struct net_device_stats *tlan_get_stats(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+
+ /* Should only read stats if open ? */
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: %s EOC count = %d\n", dev->name,
+ priv->rx_eoc_count);
+ TLAN_DBG(TLAN_DEBUG_TX, "TRANSMIT: %s Busy count = %d\n", dev->name,
+ priv->tx_busy_count);
+ if (debug & TLAN_DEBUG_GNRL) {
+ tlan_print_dio(dev->base_addr);
+ tlan_phy_print(dev);
+ }
+ if (debug & TLAN_DEBUG_LIST) {
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++)
+ tlan_print_list(priv->rx_list + i, "RX", i);
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++)
+ tlan_print_list(priv->tx_list + i, "TX", i);
+ }
+
+ return &dev->stats;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_set_multicast_list
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure to set the
+ * multicast list for.
+ *
+ * This function sets the TLAN adaptor to various receive
+ * modes. If the IFF_PROMISC flag is set, promiscuous
+ * mode is acitviated. Otherwise, promiscuous mode is
+ * turned off. If the IFF_ALLMULTI flag is set, then
+ * the hash table is set to receive all group addresses.
+ * Otherwise, the first three multicast addresses are
+ * stored in AREG_1-3, and the rest are selected via the
+ * hash table, as necessary.
+ *
+ **************************************************************/
+
+static void tlan_set_multicast_list(struct net_device *dev)
+{
+ struct netdev_hw_addr *ha;
+ u32 hash1 = 0;
+ u32 hash2 = 0;
+ int i;
+ u32 offset;
+ u8 tmp;
+
+ if (dev->flags & IFF_PROMISC) {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp | TLAN_NET_CMD_CAF);
+ } else {
+ tmp = tlan_dio_read8(dev->base_addr, TLAN_NET_CMD);
+ tlan_dio_write8(dev->base_addr,
+ TLAN_NET_CMD, tmp & ~TLAN_NET_CMD_CAF);
+ if (dev->flags & IFF_ALLMULTI) {
+ for (i = 0; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1,
+ 0xffffffff);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2,
+ 0xffffffff);
+ } else {
+ i = 0;
+ netdev_for_each_mc_addr(ha, dev) {
+ if (i < 3) {
+ tlan_set_mac(dev, i + 1,
+ (char *) &ha->addr);
+ } else {
+ offset =
+ tlan_hash_func((u8 *)&ha->addr);
+ if (offset < 32)
+ hash1 |= (1 << offset);
+ else
+ hash2 |= (1 << (offset - 32));
+ }
+ i++;
+ }
+ for ( ; i < 3; i++)
+ tlan_set_mac(dev, i + 1, NULL);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_1, hash1);
+ tlan_dio_write32(dev->base_addr, TLAN_HASH_2, hash2);
+ }
+ }
+
+}
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver interrupt vectors and table
+
+please see chap. 4, "Interrupt Handling" of the "ThunderLAN
+Programmer's Guide" for more informations on handling interrupts
+generated by TLAN based adapters.
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+
+/***************************************************************
+ * tlan_handle_tx_eof
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Tx EOF interrupts which are raised
+ * by the adapter when it has completed sending the
+ * contents of a buffer. If detemines which list/buffer
+ * was completed and resets it. If the buffer was the last
+ * in the channel (EOC), then the function checks to see if
+ * another buffer is ready to send, and if so, sends a Tx
+ * Go command. Finally, the driver activates/continues the
+ * activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eof(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int eoc = 0;
+ struct tlan_list *head_list;
+ dma_addr_t head_list_phys;
+ u32 ack = 0;
+ u16 tmp_c_stat;
+
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: Handling TX EOF (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ struct sk_buff *skb = tlan_get_skb(head_list);
+
+ ack++;
+ dma_unmap_single(&priv->pci_dev->dev,
+ head_list->buffer[0].address,
+ max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ head_list->buffer[8].address = 0;
+ head_list->buffer[9].address = 0;
+
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
+ eoc = 1;
+
+ dev->stats.tx_bytes += head_list->frame_size;
+
+ head_list->c_stat = TLAN_CSTAT_UNUSED;
+ netif_start_queue(dev);
+ CIRC_INC(priv->tx_head, TLAN_NUM_TX_LISTS);
+ head_list = priv->tx_list + priv->tx_head;
+ }
+
+ if (!ack)
+ netdev_info(dev,
+ "Received interrupt for uncompleted TX frame\n");
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d)\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->tx_in_progress = 0;
+ }
+ }
+
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleStatOverflow
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Statistics Overflow interrupt
+ * which means that one or more of the TLAN statistics
+ * registers has reached 1/2 capacity and needs to be read.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_stat_overflow(struct net_device *dev, u16 host_int)
+{
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+
+ return 1;
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_HandleRxEOF
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Rx EOF interrupt which
+ * indicates a frame has been received by the adapter from
+ * the net and the frame has been transferred to memory.
+ * The function determines the bounce buffer the frame has
+ * been loaded into, creates a new sk_buff big enough to
+ * hold the frame, and sends it to protocol stack. It
+ * then resets the used buffer and appends it to the end
+ * of the list. If the frame was the last in the Rx
+ * channel (EOC), the function restarts the receive channel
+ * by sending an Rx Go command to the adapter. Then it
+ * activates/continues the activity LED.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eof(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u32 ack = 0;
+ int eoc = 0;
+ struct tlan_list *head_list;
+ struct sk_buff *skb;
+ struct tlan_list *tail_list;
+ u16 tmp_c_stat;
+ dma_addr_t head_list_phys;
+
+ TLAN_DBG(TLAN_DEBUG_RX, "RECEIVE: handling RX EOF (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys =
+ priv->rx_list_dma + sizeof(struct tlan_list)*priv->rx_head;
+
+ while (((tmp_c_stat = head_list->c_stat) & TLAN_CSTAT_FRM_CMP)
+ && (ack < 255)) {
+ dma_addr_t frame_dma = head_list->buffer[0].address;
+ u32 frame_size = head_list->frame_size;
+ struct sk_buff *new_skb;
+
+ ack++;
+ if (tmp_c_stat & TLAN_CSTAT_EOC)
+ eoc = 1;
+
+ new_skb = netdev_alloc_skb_ip_align(dev,
+ TLAN_MAX_FRAME_SIZE + 5);
+ if (!new_skb)
+ goto drop_and_reuse;
+
+ skb = tlan_get_skb(head_list);
+ dma_unmap_single(&priv->pci_dev->dev, frame_dma,
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ skb_put(skb, frame_size);
+
+ dev->stats.rx_bytes += frame_size;
+
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+
+ head_list->buffer[0].address =
+ dma_map_single(&priv->pci_dev->dev, new_skb->data,
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+
+ tlan_store_skb(head_list, new_skb);
+drop_and_reuse:
+ head_list->forward = 0;
+ head_list->c_stat = 0;
+ tail_list = priv->rx_list + priv->rx_tail;
+ tail_list->forward = head_list_phys;
+
+ CIRC_INC(priv->rx_head, TLAN_NUM_RX_LISTS);
+ CIRC_INC(priv->rx_tail, TLAN_NUM_RX_LISTS);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ }
+
+ if (!ack)
+ netdev_info(dev,
+ "Received interrupt for uncompleted RX frame\n");
+
+
+ if (eoc) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: handling RX EOC (Head=%d Tail=%d)\n",
+ priv->rx_head, priv->rx_tail);
+ head_list = priv->rx_list + priv->rx_head;
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rx_eoc_count++;
+ }
+
+ if (priv->adapter->flags & TLAN_ADAPTER_ACTIVITY_LED) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK | TLAN_LED_ACT);
+ if (priv->timer.function == NULL) {
+ priv->timer.function = tlan_timer;
+ priv->timer.expires = jiffies + TLAN_TIMER_ACT_DELAY;
+ priv->timer_set_at = jiffies;
+ priv->timer_type = TLAN_TIMER_ACTIVITY;
+ add_timer(&priv->timer);
+ } else if (priv->timer_type == TLAN_TIMER_ACTIVITY) {
+ priv->timer_set_at = jiffies;
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_dummy
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles the Dummy interrupt, which is
+ * raised whenever a test interrupt is generated by setting
+ * the Req_Int bit of HOST_CMD to 1.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_dummy(struct net_device *dev, u16 host_int)
+{
+ netdev_info(dev, "Test interrupt\n");
+ return 1;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_tx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Tx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * functionality, so process EOC events if this is the
+ * case.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_tx_eoc(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ struct tlan_list *head_list;
+ dma_addr_t head_list_phys;
+ u32 ack = 1;
+
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_TX,
+ "TRANSMIT: handling TX EOC (Head=%d Tail=%d) -- IRQ\n",
+ priv->tx_head, priv->tx_tail);
+ head_list = priv->tx_list + priv->tx_head;
+ head_list_phys = priv->tx_list_dma
+ + sizeof(struct tlan_list)*priv->tx_head;
+ if ((head_list->c_stat & TLAN_CSTAT_READY)
+ == TLAN_CSTAT_READY) {
+ netif_stop_queue(dev);
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO;
+ } else {
+ priv->tx_in_progress = 0;
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_status_check
+ *
+ * Returns:
+ * 0 if Adapter check, 1 if Network Status check.
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This function handles Adapter Check/Network Status
+ * interrupts generated by the adapter. It checks the
+ * vector in the HOST_INT register to determine if it is
+ * an Adapter Check interrupt. If so, it resets the
+ * adapter. Otherwise it clears the status registers
+ * and services the PHY.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_status_check(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u32 ack;
+ u32 error;
+ u8 net_sts;
+ u32 phy;
+ u16 tlphy_ctl;
+ u16 tlphy_sts;
+
+ ack = 1;
+ if (host_int & TLAN_HI_IV_MASK) {
+ netif_stop_queue(dev);
+ error = inl(dev->base_addr + TLAN_CH_PARM);
+ netdev_info(dev, "Adaptor Error = 0x%x\n", error);
+ tlan_read_and_clear_stats(dev, TLAN_RECORD);
+ outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
+
+ schedule_work(&priv->tlan_tqueue);
+
+ netif_wake_queue(dev);
+ ack = 0;
+ } else {
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Status Check\n", dev->name);
+ phy = priv->phy[priv->phy_num];
+
+ net_sts = tlan_dio_read8(dev->base_addr, TLAN_NET_STS);
+ if (net_sts) {
+ tlan_dio_write8(dev->base_addr, TLAN_NET_STS, net_sts);
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Net_Sts = %x\n",
+ dev->name, (unsigned) net_sts);
+ }
+ if ((net_sts & TLAN_NET_STS_MIRQ) && (priv->phy_num == 0)) {
+ __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_STS, &tlphy_sts);
+ __tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ if (!(tlphy_sts & TLAN_TS_POLOK) &&
+ !(tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl |= TLAN_TC_SWAPOL;
+ __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ } else if ((tlphy_sts & TLAN_TS_POLOK) &&
+ (tlphy_ctl & TLAN_TC_SWAPOL)) {
+ tlphy_ctl &= ~TLAN_TC_SWAPOL;
+ __tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL,
+ tlphy_ctl);
+ }
+
+ if (debug)
+ __tlan_phy_print(dev);
+ }
+ }
+
+ return ack;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_handle_rx_eoc
+ *
+ * Returns:
+ * 1
+ * Parms:
+ * dev Device assigned the IRQ that was
+ * raised.
+ * host_int The contents of the HOST_INT
+ * port.
+ *
+ * This driver is structured to determine EOC occurrences by
+ * reading the CSTAT member of the list structure. Rx EOC
+ * interrupts are disabled via the DIO INTDIS register.
+ * However, TLAN chips before revision 3.0 didn't have this
+ * CSTAT member or a INTDIS register, so if this chip is
+ * pre-3.0, process EOC interrupts normally.
+ *
+ **************************************************************/
+
+static u32 tlan_handle_rx_eoc(struct net_device *dev, u16 host_int)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ dma_addr_t head_list_phys;
+ u32 ack = 1;
+
+ if (priv->tlan_rev < 0x30) {
+ TLAN_DBG(TLAN_DEBUG_RX,
+ "RECEIVE: Handling RX EOC (head=%d tail=%d) -- IRQ\n",
+ priv->rx_head, priv->rx_tail);
+ head_list_phys = priv->rx_list_dma
+ + sizeof(struct tlan_list)*priv->rx_head;
+ outl(head_list_phys, dev->base_addr + TLAN_CH_PARM);
+ ack |= TLAN_HC_GO | TLAN_HC_RT;
+ priv->rx_eoc_count++;
+ }
+
+ return ack;
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver timer function
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_timer
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * data A value given to add timer when
+ * add_timer was called.
+ *
+ * This function handles timed functionality for the
+ * TLAN driver. The two current timer uses are for
+ * delaying for autonegotionation and driving the ACT LED.
+ * - Autonegotiation requires being allowed about
+ * 2 1/2 seconds before attempting to transmit a
+ * packet. It would be a very bad thing to hang
+ * the kernel this long, so the driver doesn't
+ * allow transmission 'til after this time, for
+ * certain PHYs. It would be much nicer if all
+ * PHYs were interrupt-capable like the internal
+ * PHY.
+ * - The ACT LED, which shows adapter activity, is
+ * driven by the driver, and so must be left on
+ * for a short period to power up the LED so it
+ * can be seen. This delay can be changed by
+ * changing the TLAN_TIMER_ACT_DELAY in tlan.h,
+ * if desired. 100 ms produces a slightly
+ * sluggish response.
+ *
+ **************************************************************/
+
+static void tlan_timer(struct timer_list *t)
+{
+ struct tlan_priv *priv = from_timer(priv, t, timer);
+ struct net_device *dev = priv->dev;
+ u32 elapsed;
+ unsigned long flags = 0;
+
+ priv->timer.function = NULL;
+
+ switch (priv->timer_type) {
+ case TLAN_TIMER_PHY_PDOWN:
+ tlan_phy_power_down(dev);
+ break;
+ case TLAN_TIMER_PHY_PUP:
+ tlan_phy_power_up(dev);
+ break;
+ case TLAN_TIMER_PHY_RESET:
+ tlan_phy_reset(dev);
+ break;
+ case TLAN_TIMER_PHY_START_LINK:
+ tlan_phy_start_link(dev);
+ break;
+ case TLAN_TIMER_PHY_FINISH_AN:
+ tlan_phy_finish_auto_neg(dev);
+ break;
+ case TLAN_TIMER_FINISH_RESET:
+ tlan_finish_reset(dev);
+ break;
+ case TLAN_TIMER_ACTIVITY:
+ spin_lock_irqsave(&priv->lock, flags);
+ if (priv->timer.function == NULL) {
+ elapsed = jiffies - priv->timer_set_at;
+ if (elapsed >= TLAN_TIMER_ACT_DELAY) {
+ tlan_dio_write8(dev->base_addr,
+ TLAN_LED_REG, TLAN_LED_LINK);
+ } else {
+ priv->timer.expires = priv->timer_set_at
+ + TLAN_TIMER_ACT_DELAY;
+ spin_unlock_irqrestore(&priv->lock, flags);
+ add_timer(&priv->timer);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&priv->lock, flags);
+ break;
+ default:
+ break;
+ }
+
+}
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver adapter related routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_reset_lists
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure with the list
+ * structures to be reset.
+ *
+ * This routine sets the variables associated with managing
+ * the TLAN lists to their initial values.
+ *
+ **************************************************************/
+
+static void tlan_reset_lists(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+ struct tlan_list *list;
+ dma_addr_t list_phys;
+ struct sk_buff *skb;
+
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ list->c_stat = TLAN_CSTAT_UNUSED;
+ list->buffer[0].address = 0;
+ list->buffer[2].count = 0;
+ list->buffer[2].address = 0;
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+
+ priv->rx_head = 0;
+ priv->rx_tail = TLAN_NUM_RX_LISTS - 1;
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ list_phys = priv->rx_list_dma + sizeof(struct tlan_list)*i;
+ list->c_stat = TLAN_CSTAT_READY;
+ list->frame_size = TLAN_MAX_FRAME_SIZE;
+ list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
+ skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
+ if (!skb)
+ break;
+
+ list->buffer[0].address = dma_map_single(&priv->pci_dev->dev,
+ skb->data,
+ TLAN_MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+ tlan_store_skb(list, skb);
+ list->buffer[1].count = 0;
+ list->buffer[1].address = 0;
+ list->forward = list_phys + sizeof(struct tlan_list);
+ }
+
+ /* in case ran out of memory early, clear bits */
+ while (i < TLAN_NUM_RX_LISTS) {
+ tlan_store_skb(priv->rx_list + i, NULL);
+ ++i;
+ }
+ list->forward = 0;
+
+}
+
+
+static void tlan_free_lists(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+ struct tlan_list *list;
+ struct sk_buff *skb;
+
+ for (i = 0; i < TLAN_NUM_TX_LISTS; i++) {
+ list = priv->tx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ dma_unmap_single(&priv->pci_dev->dev,
+ list->buffer[0].address,
+ max(skb->len, (unsigned int)TLAN_MIN_FRAME_SIZE),
+ DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+ }
+
+ for (i = 0; i < TLAN_NUM_RX_LISTS; i++) {
+ list = priv->rx_list + i;
+ skb = tlan_get_skb(list);
+ if (skb) {
+ dma_unmap_single(&priv->pci_dev->dev,
+ list->buffer[0].address,
+ TLAN_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ list->buffer[8].address = 0;
+ list->buffer[9].address = 0;
+ }
+ }
+}
+
+
+
+
+/***************************************************************
+ * tlan_print_dio
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base Base IO port of the device of
+ * which to print DIO registers.
+ *
+ * This function prints out all the internal (DIO)
+ * registers of a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_print_dio(u16 io_base)
+{
+ u32 data0, data1;
+ int i;
+
+ pr_info("Contents of internal registers for io base 0x%04hx\n",
+ io_base);
+ pr_info("Off. +0 +4\n");
+ for (i = 0; i < 0x4C; i += 8) {
+ data0 = tlan_dio_read32(io_base, i);
+ data1 = tlan_dio_read32(io_base, i + 0x4);
+ pr_info("0x%02x 0x%08x 0x%08x\n", i, data0, data1);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_PrintList
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * list A pointer to the struct tlan_list structure to
+ * be printed.
+ * type A string to designate type of list,
+ * "Rx" or "Tx".
+ * num The index of the list.
+ *
+ * This function prints out the contents of the list
+ * pointed to by the list parameter.
+ *
+ **************************************************************/
+
+static void tlan_print_list(struct tlan_list *list, char *type, int num)
+{
+ int i;
+
+ pr_info("%s List %d at %p\n", type, num, list);
+ pr_info(" Forward = 0x%08x\n", list->forward);
+ pr_info(" CSTAT = 0x%04hx\n", list->c_stat);
+ pr_info(" Frame Size = 0x%04hx\n", list->frame_size);
+ /* for (i = 0; i < 10; i++) { */
+ for (i = 0; i < 2; i++) {
+ pr_info(" Buffer[%d].count, addr = 0x%08x, 0x%08x\n",
+ i, list->buffer[i].count, list->buffer[i].address);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_read_and_clear_stats
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to which to read stats.
+ * record Flag indicating whether to add
+ *
+ * This functions reads all the internal status registers
+ * of the TLAN chip, which clears them as a side effect.
+ * It then either adds the values to the device's status
+ * struct, or discards them, depending on whether record
+ * is TLAN_RECORD (!=0) or TLAN_IGNORE (==0).
+ *
+ **************************************************************/
+
+static void tlan_read_and_clear_stats(struct net_device *dev, int record)
+{
+ u32 tx_good, tx_under;
+ u32 rx_good, rx_over;
+ u32 def_tx, crc, code;
+ u32 multi_col, single_col;
+ u32 excess_col, late_col, loss;
+
+ outw(TLAN_GOOD_TX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ tx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ tx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ tx_under = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_GOOD_RX_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ rx_good = inb(dev->base_addr + TLAN_DIO_DATA);
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ rx_good += inb(dev->base_addr + TLAN_DIO_DATA + 2) << 16;
+ rx_over = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_DEFERRED_TX, dev->base_addr + TLAN_DIO_ADR);
+ def_tx = inb(dev->base_addr + TLAN_DIO_DATA);
+ def_tx += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ crc = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ code = inb(dev->base_addr + TLAN_DIO_DATA + 3);
+
+ outw(TLAN_MULTICOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ multi_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ multi_col += inb(dev->base_addr + TLAN_DIO_DATA + 1) << 8;
+ single_col = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+ single_col += inb(dev->base_addr + TLAN_DIO_DATA + 3) << 8;
+
+ outw(TLAN_EXCESSCOL_FRMS, dev->base_addr + TLAN_DIO_ADR);
+ excess_col = inb(dev->base_addr + TLAN_DIO_DATA);
+ late_col = inb(dev->base_addr + TLAN_DIO_DATA + 1);
+ loss = inb(dev->base_addr + TLAN_DIO_DATA + 2);
+
+ if (record) {
+ dev->stats.rx_packets += rx_good;
+ dev->stats.rx_errors += rx_over + crc + code;
+ dev->stats.tx_packets += tx_good;
+ dev->stats.tx_errors += tx_under + loss;
+ dev->stats.collisions += multi_col
+ + single_col + excess_col + late_col;
+
+ dev->stats.rx_over_errors += rx_over;
+ dev->stats.rx_crc_errors += crc;
+ dev->stats.rx_frame_errors += code;
+
+ dev->stats.tx_aborted_errors += tx_under;
+ dev->stats.tx_carrier_errors += loss;
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_Reset
+ *
+ * Returns:
+ * 0
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * to be reset.
+ *
+ * This function resets the adapter and it's physical
+ * device. See Chap. 3, pp. 9-10 of the "ThunderLAN
+ * Programmer's Guide" for details. The routine tries to
+ * implement what is detailed there, though adjustments
+ * have been made.
+ *
+ **************************************************************/
+
+static void
+tlan_reset_adapter(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ int i;
+ u32 addr;
+ u32 data;
+ u8 data8;
+
+ priv->tlan_full_duplex = false;
+ priv->phy_online = 0;
+ netif_carrier_off(dev);
+
+/* 1. Assert reset bit. */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_AD_RST;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+ udelay(1000);
+
+/* 2. Turn off interrupts. (Probably isn't necessary) */
+
+ data = inl(dev->base_addr + TLAN_HOST_CMD);
+ data |= TLAN_HC_INT_OFF;
+ outl(data, dev->base_addr + TLAN_HOST_CMD);
+
+/* 3. Clear AREGs and HASHs. */
+
+ for (i = TLAN_AREG_0; i <= TLAN_HASH_2; i += 4)
+ tlan_dio_write32(dev->base_addr, (u16) i, 0);
+
+/* 4. Setup NetConfig register. */
+
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
+
+/* 5. Load Ld_Tmr and Ld_Thr in HOST_CMD. */
+
+ outl(TLAN_HC_LD_TMR | 0x3f, dev->base_addr + TLAN_HOST_CMD);
+ outl(TLAN_HC_LD_THR | 0x9, dev->base_addr + TLAN_HOST_CMD);
+
+/* 6. Unreset the MII by setting NMRST (in NetSio) to 1. */
+
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ addr = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+ tlan_set_bit(TLAN_NET_SIO_NMRST, addr);
+
+/* 7. Setup the remaining registers. */
+
+ if (priv->tlan_rev >= 0x30) {
+ data8 = TLAN_ID_TX_EOC | TLAN_ID_RX_EOC;
+ tlan_dio_write8(dev->base_addr, TLAN_INT_DIS, data8);
+ }
+ tlan_phy_detect(dev);
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN;
+
+ if (priv->adapter->flags & TLAN_ADAPTER_BIT_RATE_PHY) {
+ data |= TLAN_NET_CFG_BIT;
+ if (priv->aui == 1) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x0a);
+ } else if (priv->duplex == TLAN_DUPLEX_FULL) {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x00);
+ priv->tlan_full_duplex = true;
+ } else {
+ tlan_dio_write8(dev->base_addr, TLAN_ACOMMIT, 0x08);
+ }
+ }
+
+ /* don't power down internal PHY if we're going to use it */
+ if (priv->phy_num == 0 ||
+ (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
+ data |= TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY)
+ tlan_finish_reset(dev);
+ else
+ tlan_phy_power_down(dev);
+
+}
+
+
+
+
+static void
+tlan_finish_reset(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u8 data;
+ u32 phy;
+ u8 sio;
+ u16 status;
+ u16 partner;
+ u16 tlphy_ctl;
+ u16 tlphy_par;
+ u16 tlphy_id1, tlphy_id2;
+ int i;
+
+ phy = priv->phy[priv->phy_num];
+
+ data = TLAN_NET_CMD_NRESET | TLAN_NET_CMD_NWRAP;
+ if (priv->tlan_full_duplex)
+ data |= TLAN_NET_CMD_DUPLEX;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_CMD, data);
+ data = TLAN_NET_MASK_MASK4 | TLAN_NET_MASK_MASK5;
+ if (priv->phy_num == 0)
+ data |= TLAN_NET_MASK_MASK7;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_MASK, data);
+ tlan_dio_write16(dev->base_addr, TLAN_MAX_RX, ((1536)+7)&~7);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &tlphy_id1);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &tlphy_id2);
+
+ if ((priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) ||
+ (priv->aui)) {
+ status = MII_GS_LINK;
+ netdev_info(dev, "Link forced\n");
+ } else {
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ if (status & MII_GS_LINK) {
+ /* We only support link info on Nat.Sem. PHY's */
+ if ((tlphy_id1 == NAT_SEM_ID1) &&
+ (tlphy_id2 == NAT_SEM_ID2)) {
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA,
+ &partner);
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
+ &tlphy_par);
+
+ netdev_info(dev,
+ "Link active, %s %uMbps %s-Duplex\n",
+ !(tlphy_par & TLAN_PHY_AN_EN_STAT)
+ ? "forced" : "Autonegotiation enabled,",
+ tlphy_par & TLAN_PHY_SPEED_100
+ ? 100 : 10,
+ tlphy_par & TLAN_PHY_DUPLEX_FULL
+ ? "Full" : "Half");
+
+ if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+ netdev_info(dev, "Partner capability:");
+ for (i = 5; i < 10; i++)
+ if (partner & (1 << i))
+ pr_cont(" %s",
+ media[i-5]);
+ pr_cont("\n");
+ }
+ } else
+ netdev_info(dev, "Link active\n");
+ /* Enabling link beat monitoring */
+ priv->media_timer.expires = jiffies + HZ;
+ add_timer(&priv->media_timer);
+ }
+ }
+
+ if (priv->phy_num == 0) {
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tlphy_ctl);
+ tlphy_ctl |= TLAN_TC_INTEN;
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tlphy_ctl);
+ sio = tlan_dio_read8(dev->base_addr, TLAN_NET_SIO);
+ sio |= TLAN_NET_SIO_MINTEN;
+ tlan_dio_write8(dev->base_addr, TLAN_NET_SIO, sio);
+ }
+
+ if (status & MII_GS_LINK) {
+ tlan_set_mac(dev, 0, dev->dev_addr);
+ priv->phy_online = 1;
+ outb((TLAN_HC_INT_ON >> 8), dev->base_addr + TLAN_HOST_CMD + 1);
+ if (debug >= 1 && debug != TLAN_DEBUG_PROBE)
+ outb((TLAN_HC_REQ_INT >> 8),
+ dev->base_addr + TLAN_HOST_CMD + 1);
+ outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
+ outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
+ netif_carrier_on(dev);
+ } else {
+ netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
+ tlan_set_timer(dev, (10*HZ), TLAN_TIMER_FINISH_RESET);
+ return;
+ }
+ tlan_set_multicast_list(dev);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_set_mac
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev Pointer to device structure of adapter
+ * on which to change the AREG.
+ * areg The AREG to set the address in (0 - 3).
+ * mac A pointer to an array of chars. Each
+ * element stores one byte of the address.
+ * IE, it isn't in ascii.
+ *
+ * This function transfers a MAC address to one of the
+ * TLAN AREGs (address registers). The TLAN chip locks
+ * the register on writing to offset 0 and unlocks the
+ * register after writing to offset 5. If NULL is passed
+ * in mac, then the AREG is filled with 0's.
+ *
+ **************************************************************/
+
+static void tlan_set_mac(struct net_device *dev, int areg, const char *mac)
+{
+ int i;
+
+ areg *= 6;
+
+ if (mac != NULL) {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, mac[i]);
+ } else {
+ for (i = 0; i < 6; i++)
+ tlan_dio_write8(dev->base_addr,
+ TLAN_AREG_0 + areg + i, 0);
+ }
+
+}
+
+
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver PHY layer routines
+
+******************************************************************************
+*****************************************************************************/
+
+
+
+/*********************************************************************
+ * __tlan_phy_print
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the
+ * TLAN device having the PHYs to be detailed.
+ *
+ * This function prints the registers a PHY (aka transceiver).
+ *
+ ********************************************************************/
+
+static void __tlan_phy_print(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 i, data0, data1, data2, data3, phy;
+
+ lockdep_assert_held(&priv->lock);
+
+ phy = priv->phy[priv->phy_num];
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ netdev_info(dev, "Unmanaged PHY\n");
+ } else if (phy <= TLAN_PHY_MAX_ADDR) {
+ netdev_info(dev, "PHY 0x%02x\n", phy);
+ pr_info(" Off. +0 +1 +2 +3\n");
+ for (i = 0; i < 0x20; i += 4) {
+ __tlan_mii_read_reg(dev, phy, i, &data0);
+ __tlan_mii_read_reg(dev, phy, i + 1, &data1);
+ __tlan_mii_read_reg(dev, phy, i + 2, &data2);
+ __tlan_mii_read_reg(dev, phy, i + 3, &data3);
+ pr_info(" 0x%02x 0x%04hx 0x%04hx 0x%04hx 0x%04hx\n",
+ i, data0, data1, data2, data3);
+ }
+ } else {
+ netdev_info(dev, "Invalid PHY\n");
+ }
+
+}
+
+static void tlan_phy_print(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ __tlan_phy_print(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+
+/*********************************************************************
+ * tlan_phy_detect
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev A pointer to the device structure of the adapter
+ * for which the PHY needs determined.
+ *
+ * So far I've found that adapters which have external PHYs
+ * may also use the internal PHY for part of the functionality.
+ * (eg, AUI/Thinnet). This function finds out if this TLAN
+ * chip has an internal PHY, and then finds the first external
+ * PHY (starting from address 0) if it exists).
+ *
+ ********************************************************************/
+
+static void tlan_phy_detect(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 control;
+ u16 hi;
+ u16 lo;
+ u32 phy;
+
+ if (priv->adapter->flags & TLAN_ADAPTER_UNMANAGED_PHY) {
+ priv->phy_num = 0xffff;
+ return;
+ }
+
+ tlan_mii_read_reg(dev, TLAN_PHY_MAX_ADDR, MII_GEN_ID_HI, &hi);
+
+ if (hi != 0xffff)
+ priv->phy[0] = TLAN_PHY_MAX_ADDR;
+ else
+ priv->phy[0] = TLAN_PHY_NONE;
+
+ priv->phy[1] = TLAN_PHY_NONE;
+ for (phy = 0; phy <= TLAN_PHY_MAX_ADDR; phy++) {
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &control);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_HI, &hi);
+ tlan_mii_read_reg(dev, phy, MII_GEN_ID_LO, &lo);
+ if ((control != 0xffff) ||
+ (hi != 0xffff) || (lo != 0xffff)) {
+ TLAN_DBG(TLAN_DEBUG_GNRL,
+ "PHY found at %02x %04x %04x %04x\n",
+ phy, control, hi, lo);
+ if ((priv->phy[1] == TLAN_PHY_NONE) &&
+ (phy != TLAN_PHY_MAX_ADDR)) {
+ priv->phy[1] = phy;
+ }
+ }
+ }
+
+ if (priv->phy[1] != TLAN_PHY_NONE)
+ priv->phy_num = 1;
+ else if (priv->phy[0] != TLAN_PHY_NONE)
+ priv->phy_num = 0;
+ else
+ netdev_info(dev, "Cannot initialize device, no PHY was found!\n");
+
+}
+
+
+
+
+static void tlan_phy_power_down(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 value;
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering down PHY(s).\n", dev->name);
+ value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
+ /* if using internal PHY, the external PHY must be powered on */
+ if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
+ value = MII_GC_ISOLATE; /* just isolate it from MII */
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
+ }
+
+ /* Wait for 50 ms and powerup
+ * This is arbitrary. It is intended to make sure the
+ * transceiver settles.
+ */
+ tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_PUP);
+
+}
+
+
+
+
+static void tlan_phy_power_up(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 value;
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Powering up PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
+ value = MII_GC_LOOPBK;
+ tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
+ tlan_mii_sync(dev->base_addr);
+ /* Wait for 500 ms and reset the
+ * transceiver. The TLAN docs say both 50 ms and
+ * 500 ms, so do the longer, just in case.
+ */
+ tlan_set_timer(dev, msecs_to_jiffies(500), TLAN_TIMER_PHY_RESET);
+
+}
+
+
+
+
+static void tlan_phy_reset(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 phy;
+ u16 value;
+ unsigned long timeout = jiffies + HZ;
+
+ phy = priv->phy[priv->phy_num];
+
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Resetting PHY.\n", dev->name);
+ tlan_mii_sync(dev->base_addr);
+ value = MII_GC_LOOPBK | MII_GC_RESET;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
+ do {
+ tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+ if (time_after(jiffies, timeout)) {
+ netdev_err(dev, "PHY reset timeout\n");
+ return;
+ }
+ } while (value & MII_GC_RESET);
+
+ /* Wait for 500 ms and initialize.
+ * I don't remember why I wait this long.
+ * I've changed this to 50ms, as it seems long enough.
+ */
+ tlan_set_timer(dev, msecs_to_jiffies(50), TLAN_TIMER_PHY_START_LINK);
+
+}
+
+
+
+
+static void tlan_phy_start_link(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 ability;
+ u16 control;
+ u16 data;
+ u16 phy;
+ u16 status;
+ u16 tctl;
+
+ phy = priv->phy[priv->phy_num];
+ TLAN_DBG(TLAN_DEBUG_GNRL, "%s: Trying to activate link.\n", dev->name);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &ability);
+
+ if ((status & MII_GS_AUTONEG) &&
+ (!priv->aui)) {
+ ability = status >> 11;
+ if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0000);
+ } else if (priv->speed == TLAN_SPEED_10 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x0100);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_HALF) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2000);
+ } else if (priv->speed == TLAN_SPEED_100 &&
+ priv->duplex == TLAN_DUPLEX_FULL) {
+ priv->tlan_full_duplex = true;
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x2100);
+ } else {
+
+ /* Set Auto-Neg advertisement */
+ tlan_mii_write_reg(dev, phy, MII_AN_ADV,
+ (ability << 5) | 1);
+ /* Enablee Auto-Neg */
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1000);
+ /* Restart Auto-Neg */
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, 0x1200);
+ /* Wait for 4 sec for autonegotiation
+ * to complete. The max spec time is less than this
+ * but the card need additional time to start AN.
+ * .5 sec should be plenty extra.
+ */
+ netdev_info(dev, "Starting autonegotiation\n");
+ tlan_set_timer(dev, (2*HZ), TLAN_TIMER_PHY_FINISH_AN);
+ return;
+ }
+
+ }
+
+ if ((priv->aui) && (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
+ | TLAN_NET_CFG_PHY_EN;
+ tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
+ tlan_set_timer(dev, msecs_to_jiffies(40), TLAN_TIMER_PHY_PDOWN);
+ return;
+ } else if (priv->phy_num == 0) {
+ control = 0;
+ tlan_mii_read_reg(dev, phy, TLAN_TLPHY_CTL, &tctl);
+ if (priv->aui) {
+ tctl |= TLAN_TC_AUISEL;
+ } else {
+ tctl &= ~TLAN_TC_AUISEL;
+ if (priv->duplex == TLAN_DUPLEX_FULL) {
+ control |= MII_GC_DUPLEX;
+ priv->tlan_full_duplex = true;
+ }
+ if (priv->speed == TLAN_SPEED_100)
+ control |= MII_GC_SPEEDSEL;
+ }
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL, control);
+ tlan_mii_write_reg(dev, phy, TLAN_TLPHY_CTL, tctl);
+ }
+
+ /* Wait for 2 sec to give the transceiver time
+ * to establish link.
+ */
+ tlan_set_timer(dev, (4*HZ), TLAN_TIMER_FINISH_RESET);
+
+}
+
+
+
+
+static void tlan_phy_finish_auto_neg(struct net_device *dev)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ u16 an_adv;
+ u16 an_lpa;
+ u16 mode;
+ u16 phy;
+ u16 status;
+
+ phy = priv->phy[priv->phy_num];
+
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+ udelay(1000);
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
+
+ if (!(status & MII_GS_AUTOCMPLT)) {
+ /* Wait for 8 sec to give the process
+ * more time. Perhaps we should fail after a while.
+ */
+ tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
+ return;
+ }
+
+ netdev_info(dev, "Autonegotiation complete\n");
+ tlan_mii_read_reg(dev, phy, MII_AN_ADV, &an_adv);
+ tlan_mii_read_reg(dev, phy, MII_AN_LPA, &an_lpa);
+ mode = an_adv & an_lpa & 0x03E0;
+ if (mode & 0x0100)
+ priv->tlan_full_duplex = true;
+ else if (!(mode & 0x0080) && (mode & 0x0040))
+ priv->tlan_full_duplex = true;
+
+ /* switch to internal PHY for 10 Mbps */
+ if ((!(mode & 0x0180)) &&
+ (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
+ (priv->phy_num != 0)) {
+ priv->phy_num = 0;
+ tlan_set_timer(dev, msecs_to_jiffies(400), TLAN_TIMER_PHY_PDOWN);
+ return;
+ }
+
+ if (priv->phy_num == 0) {
+ if ((priv->duplex == TLAN_DUPLEX_FULL) ||
+ (an_adv & an_lpa & 0x0040)) {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB | MII_GC_DUPLEX);
+ netdev_info(dev, "Starting internal PHY with FULL-DUPLEX\n");
+ } else {
+ tlan_mii_write_reg(dev, phy, MII_GEN_CTL,
+ MII_GC_AUTOENB);
+ netdev_info(dev, "Starting internal PHY with HALF-DUPLEX\n");
+ }
+ }
+
+ /* Wait for 100 ms. No reason in partiticular.
+ */
+ tlan_set_timer(dev, msecs_to_jiffies(100), TLAN_TIMER_FINISH_RESET);
+
+}
+
+
+/*********************************************************************
+ *
+ * tlan_phy_monitor
+ *
+ * Returns:
+ * None
+ *
+ * Params:
+ * data The device structure of this device.
+ *
+ *
+ * This function monitors PHY condition by reading the status
+ * register via the MII bus, controls LINK LED and notifies the
+ * kernel about link state.
+ *
+ *******************************************************************/
+
+static void tlan_phy_monitor(struct timer_list *t)
+{
+ struct tlan_priv *priv = from_timer(priv, t, media_timer);
+ struct net_device *dev = priv->dev;
+ u16 phy;
+ u16 phy_status;
+
+ phy = priv->phy[priv->phy_num];
+
+ /* Get PHY status register */
+ tlan_mii_read_reg(dev, phy, MII_GEN_STS, &phy_status);
+
+ /* Check if link has been lost */
+ if (!(phy_status & MII_GS_LINK)) {
+ if (netif_carrier_ok(dev)) {
+ printk(KERN_DEBUG "TLAN: %s has lost link\n",
+ dev->name);
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
+ netif_carrier_off(dev);
+ if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
+ /* power down internal PHY */
+ u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
+ MII_GC_ISOLATE;
+
+ tlan_mii_sync(dev->base_addr);
+ tlan_mii_write_reg(dev, priv->phy[0],
+ MII_GEN_CTL, data);
+ /* set to external PHY */
+ priv->phy_num = 1;
+ /* restart autonegotiation */
+ tlan_set_timer(dev, msecs_to_jiffies(400),
+ TLAN_TIMER_PHY_PDOWN);
+ return;
+ }
+ }
+ }
+
+ /* Link restablished? */
+ if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
+ tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
+ printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
+ dev->name);
+ netif_carrier_on(dev);
+ }
+ priv->media_timer.expires = jiffies + HZ;
+ add_timer(&priv->media_timer);
+}
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver MII routines
+
+these routines are based on the information in chap. 2 of the
+"ThunderLAN Programmer's Guide", pp. 15-24.
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * __tlan_mii_read_reg
+ *
+ * Returns:
+ * false if ack received ok
+ * true if no ack received or other error
+ *
+ * Parms:
+ * dev The device structure containing
+ * The io address and interrupt count
+ * for this device.
+ * phy The address of the PHY to be queried.
+ * reg The register whose contents are to be
+ * retrieved.
+ * val A pointer to a variable to store the
+ * retrieved value.
+ *
+ * This function uses the TLAN's MII bus to retrieve the contents
+ * of a given register on a PHY. It sends the appropriate info
+ * and then reads the 16-bit register value from the MII bus via
+ * the TLAN SIO register.
+ *
+ **************************************************************/
+
+static bool
+__tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg, u16 *val)
+{
+ u8 nack;
+ u16 sio, tmp;
+ u32 i;
+ bool err;
+ int minten;
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ lockdep_assert_held(&priv->lock);
+
+ err = false;
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ tlan_mii_sync(dev->base_addr);
+
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* read (10b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
+
+
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio); /* change direction */
+
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* clock idle bit */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* wait 300ns */
+
+ nack = tlan_get_bit(TLAN_NET_SIO_MDATA, sio); /* check for ACK */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio); /* finish ACK */
+ if (nack) { /* no ACK, so fake it */
+ for (i = 0; i < 16; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+ tmp = 0xffff;
+ err = true;
+ } else { /* ACK, so read data */
+ for (tmp = 0, i = 0x8000; i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_MDATA, sio))
+ tmp |= i;
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+ }
+
+
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ *val = tmp;
+
+ return err;
+}
+
+static void tlan_mii_read_reg(struct net_device *dev, u16 phy, u16 reg,
+ u16 *val)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ __tlan_mii_read_reg(dev, phy, reg, val);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+/***************************************************************
+ * tlan_mii_send_data
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ * dev The address of the PHY to be queried.
+ * data The value to be placed on the MII bus.
+ * num_bits The number of bits in data that are to
+ * be placed on the MII bus.
+ *
+ * This function sends on sequence of bits on the MII
+ * configuration bus.
+ *
+ **************************************************************/
+
+static void tlan_mii_send_data(u16 base_port, u32 data, unsigned num_bits)
+{
+ u16 sio;
+ u32 i;
+
+ if (num_bits == 0)
+ return;
+
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+ tlan_set_bit(TLAN_NET_SIO_MTXEN, sio);
+
+ for (i = (0x1 << (num_bits - 1)); i; i >>= 1) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ if (data & i)
+ tlan_set_bit(TLAN_NET_SIO_MDATA, sio);
+ else
+ tlan_clear_bit(TLAN_NET_SIO_MDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ (void) tlan_get_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * TLan_MiiSync
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * base_port The base IO port of the adapter in
+ * question.
+ *
+ * This functions syncs all PHYs in terms of the MII configuration
+ * bus.
+ *
+ **************************************************************/
+
+static void tlan_mii_sync(u16 base_port)
+{
+ int i;
+ u16 sio;
+
+ outw(TLAN_NET_SIO, base_port + TLAN_DIO_ADR);
+ sio = base_port + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ tlan_clear_bit(TLAN_NET_SIO_MTXEN, sio);
+ for (i = 0; i < 32; i++) {
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio);
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * __tlan_mii_write_reg
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * dev The device structure for the device
+ * to write to.
+ * phy The address of the PHY to be written to.
+ * reg The register whose contents are to be
+ * written.
+ * val The value to be written to the register.
+ *
+ * This function uses the TLAN's MII bus to write the contents of a
+ * given register on a PHY. It sends the appropriate info and then
+ * writes the 16-bit register value from the MII configuration bus
+ * via the TLAN SIO register.
+ *
+ **************************************************************/
+
+static void
+__tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
+{
+ u16 sio;
+ int minten;
+ struct tlan_priv *priv = netdev_priv(dev);
+
+ lockdep_assert_held(&priv->lock);
+
+ outw(TLAN_NET_SIO, dev->base_addr + TLAN_DIO_ADR);
+ sio = dev->base_addr + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ tlan_mii_sync(dev->base_addr);
+
+ minten = tlan_get_bit(TLAN_NET_SIO_MINTEN, sio);
+ if (minten)
+ tlan_clear_bit(TLAN_NET_SIO_MINTEN, sio);
+
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* start (01b) */
+ tlan_mii_send_data(dev->base_addr, 0x1, 2); /* write (01b) */
+ tlan_mii_send_data(dev->base_addr, phy, 5); /* device # */
+ tlan_mii_send_data(dev->base_addr, reg, 5); /* register # */
+
+ tlan_mii_send_data(dev->base_addr, 0x2, 2); /* send ACK */
+ tlan_mii_send_data(dev->base_addr, val, 16); /* send data */
+
+ tlan_clear_bit(TLAN_NET_SIO_MCLK, sio); /* idle cycle */
+ tlan_set_bit(TLAN_NET_SIO_MCLK, sio);
+
+ if (minten)
+ tlan_set_bit(TLAN_NET_SIO_MINTEN, sio);
+
+}
+
+static void
+tlan_mii_write_reg(struct net_device *dev, u16 phy, u16 reg, u16 val)
+{
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+ __tlan_mii_write_reg(dev, phy, reg, val);
+ spin_unlock_irqrestore(&priv->lock, flags);
+}
+
+
+/*****************************************************************************
+******************************************************************************
+
+ThunderLAN driver eeprom routines
+
+the Compaq netelligent 10 and 10/100 cards use a microchip 24C02A
+EEPROM. these functions are based on information in microchip's
+data sheet. I don't know how well this functions will work with
+other Eeproms.
+
+******************************************************************************
+*****************************************************************************/
+
+
+/***************************************************************
+ * tlan_ee_send_start
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ *
+ * This function sends a start cycle to an EEPROM attached
+ * to a TLAN chip.
+ *
+ **************************************************************/
+
+static void tlan_ee_send_start(u16 io_base)
+{
+ u16 sio;
+
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_send_byte
+ *
+ * Returns:
+ * If the correct ack was received, 0, otherwise 1
+ * Parms: io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data The 8 bits of information to
+ * send to the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is sent after the ack is
+ * read.
+ *
+ * This function sends a byte on the serial EEPROM line,
+ * driving the clock to send each bit. The function then
+ * reverses transmission direction and reads an acknowledge
+ * bit.
+ *
+ **************************************************************/
+
+static int tlan_ee_send_byte(u16 io_base, u8 data, int stop)
+{
+ int err;
+ u8 place;
+ u16 sio;
+
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+
+ /* Assume clock is low, tx is enabled; */
+ for (place = 0x80; place != 0; place >>= 1) {
+ if (place & data)
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ else
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ }
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ err = tlan_get_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+
+ if ((!err) && stop) {
+ /* STOP, raise data while clock is high */
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+ return err;
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_receive_byte
+ *
+ * Returns:
+ * Nothing
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * data An address to a char to hold the
+ * data sent from the EEPROM.
+ * stop If TLAN_EEPROM_STOP is passed, a
+ * stop cycle is sent after the
+ * byte is received, and no ack is
+ * sent.
+ *
+ * This function receives 8 bits of data from the EEPROM
+ * over the serial link. It then sends and ack bit, or no
+ * ack and a stop bit. This function is used to retrieve
+ * data after the address of a byte in the EEPROM has been
+ * sent.
+ *
+ **************************************************************/
+
+static void tlan_ee_receive_byte(u16 io_base, u8 *data, int stop)
+{
+ u8 place;
+ u16 sio;
+
+ outw(TLAN_NET_SIO, io_base + TLAN_DIO_ADR);
+ sio = io_base + TLAN_DIO_DATA + TLAN_NET_SIO;
+ *data = 0;
+
+ /* Assume clock is low, tx is enabled; */
+ tlan_clear_bit(TLAN_NET_SIO_ETXEN, sio);
+ for (place = 0x80; place; place >>= 1) {
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ if (tlan_get_bit(TLAN_NET_SIO_EDATA, sio))
+ *data |= place;
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ }
+
+ tlan_set_bit(TLAN_NET_SIO_ETXEN, sio);
+ if (!stop) {
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio); /* ack = 0 */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ } else {
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio); /* no ack = 1 (?) */
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_clear_bit(TLAN_NET_SIO_ECLOK, sio);
+ /* STOP, raise data while clock is high */
+ tlan_clear_bit(TLAN_NET_SIO_EDATA, sio);
+ tlan_set_bit(TLAN_NET_SIO_ECLOK, sio);
+ tlan_set_bit(TLAN_NET_SIO_EDATA, sio);
+ }
+
+}
+
+
+
+
+/***************************************************************
+ * tlan_ee_read_byte
+ *
+ * Returns:
+ * No error = 0, else, the stage at which the error
+ * occurred.
+ * Parms:
+ * io_base The IO port base address for the
+ * TLAN device with the EEPROM to
+ * use.
+ * ee_addr The address of the byte in the
+ * EEPROM whose contents are to be
+ * retrieved.
+ * data An address to a char to hold the
+ * data obtained from the EEPROM.
+ *
+ * This function reads a byte of information from an byte
+ * cell in the EEPROM.
+ *
+ **************************************************************/
+
+static int tlan_ee_read_byte(struct net_device *dev, u8 ee_addr, u8 *data)
+{
+ int err;
+ struct tlan_priv *priv = netdev_priv(dev);
+ unsigned long flags = 0;
+ int ret = 0;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa0, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 1;
+ goto fail;
+ }
+ err = tlan_ee_send_byte(dev->base_addr, ee_addr, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 2;
+ goto fail;
+ }
+ tlan_ee_send_start(dev->base_addr);
+ err = tlan_ee_send_byte(dev->base_addr, 0xa1, TLAN_EEPROM_ACK);
+ if (err) {
+ ret = 3;
+ goto fail;
+ }
+ tlan_ee_receive_byte(dev->base_addr, data, TLAN_EEPROM_STOP);
+fail:
+ spin_unlock_irqrestore(&priv->lock, flags);
+
+ return ret;
+
+}
+
+
+
diff --git a/drivers/net/ethernet/ti/tlan.h b/drivers/net/ethernet/ti/tlan.h
new file mode 100644
index 000000000..e99284118
--- /dev/null
+++ b/drivers/net/ethernet/ti/tlan.h
@@ -0,0 +1,544 @@
+#ifndef TLAN_H
+#define TLAN_H
+/********************************************************************
+ *
+ * Linux ThunderLAN Driver
+ *
+ * tlan.h
+ * by James Banks
+ *
+ * (C) 1997-1998 Caldera, Inc.
+ * (C) 1999-2001 Torben Mathiasen
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU General Public License, incorporated herein by reference.
+ *
+ *
+ * Dec 10, 1999 Torben Mathiasen <torben.mathiasen@compaq.com>
+ * New Maintainer
+ *
+ ********************************************************************/
+
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/netdevice.h>
+
+
+
+ /*****************************************************************
+ * TLan Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_MIN_FRAME_SIZE 64
+#define TLAN_MAX_FRAME_SIZE 1600
+
+#define TLAN_NUM_RX_LISTS 32
+#define TLAN_NUM_TX_LISTS 64
+
+#define TLAN_IGNORE 0
+#define TLAN_RECORD 1
+
+#define TLAN_DBG(lvl, format, args...) \
+ do { \
+ if (debug&lvl) \
+ printk(KERN_DEBUG "TLAN: " format, ##args); \
+ } while (0)
+
+#define TLAN_DEBUG_GNRL 0x0001
+#define TLAN_DEBUG_TX 0x0002
+#define TLAN_DEBUG_RX 0x0004
+#define TLAN_DEBUG_LIST 0x0008
+#define TLAN_DEBUG_PROBE 0x0010
+
+#define TX_TIMEOUT (10*HZ) /* We need time for auto-neg */
+#define MAX_TLAN_BOARDS 8 /* Max number of boards installed
+ at a time */
+
+
+ /*****************************************************************
+ * Device Identification Definitions
+ *
+ ****************************************************************/
+
+#define PCI_DEVICE_ID_NETELLIGENT_10_T2 0xB012
+#define PCI_DEVICE_ID_NETELLIGENT_10_100_WS_5100 0xB030
+#ifndef PCI_DEVICE_ID_OLICOM_OC2183
+#define PCI_DEVICE_ID_OLICOM_OC2183 0x0013
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2325
+#define PCI_DEVICE_ID_OLICOM_OC2325 0x0012
+#endif
+#ifndef PCI_DEVICE_ID_OLICOM_OC2326
+#define PCI_DEVICE_ID_OLICOM_OC2326 0x0014
+#endif
+
+struct tlan_adapter_entry {
+ u16 vendor_id;
+ u16 device_id;
+ char *device_label;
+ u32 flags;
+ u16 addr_ofs;
+};
+
+#define TLAN_ADAPTER_NONE 0x00000000
+#define TLAN_ADAPTER_UNMANAGED_PHY 0x00000001
+#define TLAN_ADAPTER_BIT_RATE_PHY 0x00000002
+#define TLAN_ADAPTER_USE_INTERN_10 0x00000004
+#define TLAN_ADAPTER_ACTIVITY_LED 0x00000008
+
+#define TLAN_SPEED_DEFAULT 0
+#define TLAN_SPEED_10 10
+#define TLAN_SPEED_100 100
+
+#define TLAN_DUPLEX_DEFAULT 0
+#define TLAN_DUPLEX_HALF 1
+#define TLAN_DUPLEX_FULL 2
+
+
+
+ /*****************************************************************
+ * EISA Definitions
+ *
+ ****************************************************************/
+
+#define EISA_ID 0xc80 /* EISA ID Registers */
+#define EISA_ID0 0xc80 /* EISA ID Register 0 */
+#define EISA_ID1 0xc81 /* EISA ID Register 1 */
+#define EISA_ID2 0xc82 /* EISA ID Register 2 */
+#define EISA_ID3 0xc83 /* EISA ID Register 3 */
+#define EISA_CR 0xc84 /* EISA Control Register */
+#define EISA_REG0 0xc88 /* EISA Configuration Register 0 */
+#define EISA_REG1 0xc89 /* EISA Configuration Register 1 */
+#define EISA_REG2 0xc8a /* EISA Configuration Register 2 */
+#define EISA_REG3 0xc8f /* EISA Configuration Register 3 */
+#define EISA_APROM 0xc90 /* Ethernet Address PROM */
+
+
+
+ /*****************************************************************
+ * Rx/Tx List Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_BUFFERS_PER_LIST 10
+#define TLAN_LAST_BUFFER 0x80000000
+#define TLAN_CSTAT_UNUSED 0x8000
+#define TLAN_CSTAT_FRM_CMP 0x4000
+#define TLAN_CSTAT_READY 0x3000
+#define TLAN_CSTAT_EOC 0x0800
+#define TLAN_CSTAT_RX_ERROR 0x0400
+#define TLAN_CSTAT_PASS_CRC 0x0200
+#define TLAN_CSTAT_DP_PR 0x0100
+
+
+struct tlan_buffer {
+ u32 count;
+ u32 address;
+};
+
+
+struct tlan_list {
+ u32 forward;
+ u16 c_stat;
+ u16 frame_size;
+ struct tlan_buffer buffer[TLAN_BUFFERS_PER_LIST];
+};
+
+
+typedef u8 TLanBuffer[TLAN_MAX_FRAME_SIZE];
+
+
+
+
+ /*****************************************************************
+ * PHY definitions
+ *
+ ****************************************************************/
+
+#define TLAN_PHY_MAX_ADDR 0x1F
+#define TLAN_PHY_NONE 0x20
+
+
+
+
+ /*****************************************************************
+ * TLAN Private Information Structure
+ *
+ ****************************************************************/
+
+struct tlan_priv {
+ struct net_device *next_device;
+ struct pci_dev *pci_dev;
+ struct net_device *dev;
+ void *dma_storage;
+ dma_addr_t dma_storage_dma;
+ unsigned int dma_size;
+ u8 *pad_buffer;
+ struct tlan_list *rx_list;
+ dma_addr_t rx_list_dma;
+ u8 *rx_buffer;
+ dma_addr_t rx_buffer_dma;
+ u32 rx_head;
+ u32 rx_tail;
+ u32 rx_eoc_count;
+ struct tlan_list *tx_list;
+ dma_addr_t tx_list_dma;
+ u8 *tx_buffer;
+ dma_addr_t tx_buffer_dma;
+ u32 tx_head;
+ u32 tx_in_progress;
+ u32 tx_tail;
+ u32 tx_busy_count;
+ u32 phy_online;
+ u32 timer_set_at;
+ u32 timer_type;
+ struct timer_list timer;
+ struct timer_list media_timer;
+ struct board *adapter;
+ u32 adapter_rev;
+ u32 aui;
+ u32 debug;
+ u32 duplex;
+ u32 phy[2];
+ u32 phy_num;
+ u32 speed;
+ u8 tlan_rev;
+ u8 tlan_full_duplex;
+ spinlock_t lock;
+ struct work_struct tlan_tqueue;
+};
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Timer Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_TIMER_ACTIVITY 2
+#define TLAN_TIMER_PHY_PDOWN 3
+#define TLAN_TIMER_PHY_PUP 4
+#define TLAN_TIMER_PHY_RESET 5
+#define TLAN_TIMER_PHY_START_LINK 6
+#define TLAN_TIMER_PHY_FINISH_AN 7
+#define TLAN_TIMER_FINISH_RESET 8
+
+#define TLAN_TIMER_ACT_DELAY (HZ/10)
+
+
+
+
+ /*****************************************************************
+ * TLan Driver Eeprom Definitions
+ *
+ ****************************************************************/
+
+#define TLAN_EEPROM_ACK 0
+#define TLAN_EEPROM_STOP 1
+
+#define TLAN_EEPROM_SIZE 256
+
+
+
+ /*****************************************************************
+ * Host Register Offsets and Contents
+ *
+ ****************************************************************/
+
+#define TLAN_HOST_CMD 0x00
+#define TLAN_HC_GO 0x80000000
+#define TLAN_HC_STOP 0x40000000
+#define TLAN_HC_ACK 0x20000000
+#define TLAN_HC_CS_MASK 0x1FE00000
+#define TLAN_HC_EOC 0x00100000
+#define TLAN_HC_RT 0x00080000
+#define TLAN_HC_NES 0x00040000
+#define TLAN_HC_AD_RST 0x00008000
+#define TLAN_HC_LD_TMR 0x00004000
+#define TLAN_HC_LD_THR 0x00002000
+#define TLAN_HC_REQ_INT 0x00001000
+#define TLAN_HC_INT_OFF 0x00000800
+#define TLAN_HC_INT_ON 0x00000400
+#define TLAN_HC_AC_MASK 0x000000FF
+#define TLAN_CH_PARM 0x04
+#define TLAN_DIO_ADR 0x08
+#define TLAN_DA_ADR_INC 0x8000
+#define TLAN_DA_RAM_ADR 0x4000
+#define TLAN_HOST_INT 0x0A
+#define TLAN_HI_IV_MASK 0x1FE0
+#define TLAN_HI_IT_MASK 0x001C
+#define TLAN_DIO_DATA 0x0C
+
+
+/* ThunderLAN Internal Register DIO Offsets */
+
+#define TLAN_NET_CMD 0x00
+#define TLAN_NET_CMD_NRESET 0x80
+#define TLAN_NET_CMD_NWRAP 0x40
+#define TLAN_NET_CMD_CSF 0x20
+#define TLAN_NET_CMD_CAF 0x10
+#define TLAN_NET_CMD_NOBRX 0x08
+#define TLAN_NET_CMD_DUPLEX 0x04
+#define TLAN_NET_CMD_TRFRAM 0x02
+#define TLAN_NET_CMD_TXPACE 0x01
+#define TLAN_NET_SIO 0x01
+#define TLAN_NET_SIO_MINTEN 0x80
+#define TLAN_NET_SIO_ECLOK 0x40
+#define TLAN_NET_SIO_ETXEN 0x20
+#define TLAN_NET_SIO_EDATA 0x10
+#define TLAN_NET_SIO_NMRST 0x08
+#define TLAN_NET_SIO_MCLK 0x04
+#define TLAN_NET_SIO_MTXEN 0x02
+#define TLAN_NET_SIO_MDATA 0x01
+#define TLAN_NET_STS 0x02
+#define TLAN_NET_STS_MIRQ 0x80
+#define TLAN_NET_STS_HBEAT 0x40
+#define TLAN_NET_STS_TXSTOP 0x20
+#define TLAN_NET_STS_RXSTOP 0x10
+#define TLAN_NET_STS_RSRVD 0x0F
+#define TLAN_NET_MASK 0x03
+#define TLAN_NET_MASK_MASK7 0x80
+#define TLAN_NET_MASK_MASK6 0x40
+#define TLAN_NET_MASK_MASK5 0x20
+#define TLAN_NET_MASK_MASK4 0x10
+#define TLAN_NET_MASK_RSRVD 0x0F
+#define TLAN_NET_CONFIG 0x04
+#define TLAN_NET_CFG_RCLK 0x8000
+#define TLAN_NET_CFG_TCLK 0x4000
+#define TLAN_NET_CFG_BIT 0x2000
+#define TLAN_NET_CFG_RXCRC 0x1000
+#define TLAN_NET_CFG_PEF 0x0800
+#define TLAN_NET_CFG_1FRAG 0x0400
+#define TLAN_NET_CFG_1CHAN 0x0200
+#define TLAN_NET_CFG_MTEST 0x0100
+#define TLAN_NET_CFG_PHY_EN 0x0080
+#define TLAN_NET_CFG_MSMASK 0x007F
+#define TLAN_MAN_TEST 0x06
+#define TLAN_DEF_VENDOR_ID 0x08
+#define TLAN_DEF_DEVICE_ID 0x0A
+#define TLAN_DEF_REVISION 0x0C
+#define TLAN_DEF_SUBCLASS 0x0D
+#define TLAN_DEF_MIN_LAT 0x0E
+#define TLAN_DEF_MAX_LAT 0x0F
+#define TLAN_AREG_0 0x10
+#define TLAN_AREG_1 0x16
+#define TLAN_AREG_2 0x1C
+#define TLAN_AREG_3 0x22
+#define TLAN_HASH_1 0x28
+#define TLAN_HASH_2 0x2C
+#define TLAN_GOOD_TX_FRMS 0x30
+#define TLAN_TX_UNDERUNS 0x33
+#define TLAN_GOOD_RX_FRMS 0x34
+#define TLAN_RX_OVERRUNS 0x37
+#define TLAN_DEFERRED_TX 0x38
+#define TLAN_CRC_ERRORS 0x3A
+#define TLAN_CODE_ERRORS 0x3B
+#define TLAN_MULTICOL_FRMS 0x3C
+#define TLAN_SINGLECOL_FRMS 0x3E
+#define TLAN_EXCESSCOL_FRMS 0x40
+#define TLAN_LATE_COLS 0x41
+#define TLAN_CARRIER_LOSS 0x42
+#define TLAN_ACOMMIT 0x43
+#define TLAN_LED_REG 0x44
+#define TLAN_LED_ACT 0x10
+#define TLAN_LED_LINK 0x01
+#define TLAN_BSIZE_REG 0x45
+#define TLAN_MAX_RX 0x46
+#define TLAN_INT_DIS 0x48
+#define TLAN_ID_TX_EOC 0x04
+#define TLAN_ID_RX_EOF 0x02
+#define TLAN_ID_RX_EOC 0x01
+
+
+
+/* ThunderLAN Interrupt Codes */
+
+#define TLAN_INT_NUMBER_OF_INTS 8
+
+#define TLAN_INT_NONE 0x0000
+#define TLAN_INT_TX_EOF 0x0001
+#define TLAN_INT_STAT_OVERFLOW 0x0002
+#define TLAN_INT_RX_EOF 0x0003
+#define TLAN_INT_DUMMY 0x0004
+#define TLAN_INT_TX_EOC 0x0005
+#define TLAN_INT_STATUS_CHECK 0x0006
+#define TLAN_INT_RX_EOC 0x0007
+
+
+
+/* ThunderLAN MII Registers */
+
+/* Generic MII/PHY Registers */
+
+#define MII_GEN_CTL 0x00
+#define MII_GC_RESET 0x8000
+#define MII_GC_LOOPBK 0x4000
+#define MII_GC_SPEEDSEL 0x2000
+#define MII_GC_AUTOENB 0x1000
+#define MII_GC_PDOWN 0x0800
+#define MII_GC_ISOLATE 0x0400
+#define MII_GC_AUTORSRT 0x0200
+#define MII_GC_DUPLEX 0x0100
+#define MII_GC_COLTEST 0x0080
+#define MII_GC_RESERVED 0x007F
+#define MII_GEN_STS 0x01
+#define MII_GS_100BT4 0x8000
+#define MII_GS_100BTXFD 0x4000
+#define MII_GS_100BTXHD 0x2000
+#define MII_GS_10BTFD 0x1000
+#define MII_GS_10BTHD 0x0800
+#define MII_GS_RESERVED 0x07C0
+#define MII_GS_AUTOCMPLT 0x0020
+#define MII_GS_RFLT 0x0010
+#define MII_GS_AUTONEG 0x0008
+#define MII_GS_LINK 0x0004
+#define MII_GS_JABBER 0x0002
+#define MII_GS_EXTCAP 0x0001
+#define MII_GEN_ID_HI 0x02
+#define MII_GEN_ID_LO 0x03
+#define MII_GIL_OUI 0xFC00
+#define MII_GIL_MODEL 0x03F0
+#define MII_GIL_REVISION 0x000F
+#define MII_AN_ADV 0x04
+#define MII_AN_LPA 0x05
+#define MII_AN_EXP 0x06
+
+/* ThunderLAN Specific MII/PHY Registers */
+
+#define TLAN_TLPHY_ID 0x10
+#define TLAN_TLPHY_CTL 0x11
+#define TLAN_TC_IGLINK 0x8000
+#define TLAN_TC_SWAPOL 0x4000
+#define TLAN_TC_AUISEL 0x2000
+#define TLAN_TC_SQEEN 0x1000
+#define TLAN_TC_MTEST 0x0800
+#define TLAN_TC_RESERVED 0x07F8
+#define TLAN_TC_NFEW 0x0004
+#define TLAN_TC_INTEN 0x0002
+#define TLAN_TC_TINT 0x0001
+#define TLAN_TLPHY_STS 0x12
+#define TLAN_TS_MINT 0x8000
+#define TLAN_TS_PHOK 0x4000
+#define TLAN_TS_POLOK 0x2000
+#define TLAN_TS_TPENERGY 0x1000
+#define TLAN_TS_RESERVED 0x0FFF
+#define TLAN_TLPHY_PAR 0x19
+#define TLAN_PHY_CIM_STAT 0x0020
+#define TLAN_PHY_SPEED_100 0x0040
+#define TLAN_PHY_DUPLEX_FULL 0x0080
+#define TLAN_PHY_AN_EN_STAT 0x0400
+
+/* National Sem. & Level1 PHY id's */
+#define NAT_SEM_ID1 0x2000
+#define NAT_SEM_ID2 0x5C01
+#define LEVEL1_ID1 0x7810
+#define LEVEL1_ID2 0x0000
+
+#define CIRC_INC(a, b) if (++a >= b) a = 0
+
+/* Routines to access internal registers. */
+
+static inline u8 tlan_dio_read8(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return inb((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x3));
+
+}
+
+
+
+
+static inline u16 tlan_dio_read16(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return inw((base_addr + TLAN_DIO_DATA) + (internal_addr & 0x2));
+
+}
+
+
+
+
+static inline u32 tlan_dio_read32(u16 base_addr, u16 internal_addr)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ return inl(base_addr + TLAN_DIO_DATA);
+
+}
+
+
+
+
+static inline void tlan_dio_write8(u16 base_addr, u16 internal_addr, u8 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outb(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x3));
+
+}
+
+
+
+
+static inline void tlan_dio_write16(u16 base_addr, u16 internal_addr, u16 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outw(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+
+
+
+static inline void tlan_dio_write32(u16 base_addr, u16 internal_addr, u32 data)
+{
+ outw(internal_addr, base_addr + TLAN_DIO_ADR);
+ outl(data, base_addr + TLAN_DIO_DATA + (internal_addr & 0x2));
+
+}
+
+#define tlan_clear_bit(bit, port) outb_p(inb_p(port) & ~bit, port)
+#define tlan_get_bit(bit, port) ((int) (inb_p(port) & bit))
+#define tlan_set_bit(bit, port) outb_p(inb_p(port) | bit, port)
+
+/*
+ * given 6 bytes, view them as 8 6-bit numbers and return the XOR of those
+ * the code below is about seven times as fast as the original code
+ *
+ * The original code was:
+ *
+ * u32 xor(u32 a, u32 b) { return ((a && !b ) || (! a && b )); }
+ *
+ * #define XOR8(a, b, c, d, e, f, g, h) \
+ * xor(a, xor(b, xor(c, xor(d, xor(e, xor(f, xor(g, h)) ) ) ) ) )
+ * #define DA(a, bit) (( (u8) a[bit/8] ) & ( (u8) (1 << bit%8)) )
+ *
+ * hash = XOR8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24),
+ * DA(a,30), DA(a,36), DA(a,42));
+ * hash |= XOR8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25),
+ * DA(a,31), DA(a,37), DA(a,43)) << 1;
+ * hash |= XOR8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26),
+ * DA(a,32), DA(a,38), DA(a,44)) << 2;
+ * hash |= XOR8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27),
+ * DA(a,33), DA(a,39), DA(a,45)) << 3;
+ * hash |= XOR8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28),
+ * DA(a,34), DA(a,40), DA(a,46)) << 4;
+ * hash |= XOR8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29),
+ * DA(a,35), DA(a,41), DA(a,47)) << 5;
+ *
+ */
+static inline u32 tlan_hash_func(const u8 *a)
+{
+ u8 hash;
+
+ hash = (a[0]^a[3]); /* & 077 */
+ hash ^= ((a[0]^a[3])>>6); /* & 003 */
+ hash ^= ((a[1]^a[4])<<2); /* & 074 */
+ hash ^= ((a[1]^a[4])>>4); /* & 017 */
+ hash ^= ((a[2]^a[5])<<4); /* & 060 */
+ hash ^= ((a[2]^a[5])>>2); /* & 077 */
+
+ return hash & 077;
+}
+#endif